consensus.rs
1 /* This file is part of DarkFi (https://dark.fi) 2 * 3 * Copyright (C) 2020-2025 Dyne.org foundation 4 * 5 * This program is free software: you can redistribute it and/or modify 6 * it under the terms of the GNU Affero General Public License as 7 * published by the Free Software Foundation, either version 3 of the 8 * License, or (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU Affero General Public License for more details. 14 * 15 * You should have received a copy of the GNU Affero General Public License 16 * along with this program. If not, see <https://www.gnu.org/licenses/>. 17 */ 18 19 use std::collections::{HashMap, HashSet}; 20 21 use darkfi_sdk::{crypto::MerkleTree, monotree::Monotree, tx::TransactionHash}; 22 use darkfi_serial::{async_trait, SerialDecodable, SerialEncodable}; 23 use num_bigint::BigUint; 24 use sled_overlay::database::SledDbOverlayStateDiff; 25 use smol::lock::RwLock; 26 use tracing::{debug, info, warn}; 27 28 use crate::{ 29 blockchain::{ 30 block_store::{BlockDifficulty, BlockRanks}, 31 BlockInfo, Blockchain, BlockchainOverlay, BlockchainOverlayPtr, Header, HeaderHash, 32 }, 33 runtime::vm_runtime::GAS_LIMIT, 34 tx::{Transaction, MAX_TX_CALLS}, 35 validator::{ 36 pow::PoWModule, 37 utils::{best_fork_index, block_rank, find_extended_fork_index}, 38 verification::{verify_proposal, verify_transaction}, 39 }, 40 zk::VerifyingKey, 41 Error, Result, 42 }; 43 44 /// Gas limit for total block transactions(50 full transactions). 45 pub const BLOCK_GAS_LIMIT: u64 = GAS_LIMIT * MAX_TX_CALLS as u64 * 50; 46 47 /// This struct represents the information required by the consensus algorithm 48 pub struct Consensus { 49 /// Canonical (confirmed) blockchain 50 pub blockchain: Blockchain, 51 /// Fork size(length) after which it can be confirmed 52 pub confirmation_threshold: usize, 53 /// Fork chains containing block proposals 54 pub forks: RwLock<Vec<Fork>>, 55 /// Canonical blockchain PoW module state 56 pub module: RwLock<PoWModule>, 57 /// Lock to restrict when proposals appends can happen 58 pub append_lock: RwLock<()>, 59 } 60 61 impl Consensus { 62 /// Generate a new Consensus state. 63 pub fn new( 64 blockchain: Blockchain, 65 confirmation_threshold: usize, 66 pow_target: u32, 67 pow_fixed_difficulty: Option<BigUint>, 68 ) -> Result<Self> { 69 let forks = RwLock::new(vec![]); 70 let module = RwLock::new(PoWModule::new( 71 blockchain.clone(), 72 pow_target, 73 pow_fixed_difficulty, 74 None, 75 )?); 76 let append_lock = RwLock::new(()); 77 Ok(Self { blockchain, confirmation_threshold, forks, module, append_lock }) 78 } 79 80 /// Generate a new empty fork. 81 pub async fn generate_empty_fork(&self) -> Result<()> { 82 debug!(target: "validator::consensus::generate_empty_fork", "Generating new empty fork..."); 83 let mut forks = self.forks.write().await; 84 // Check if we already have an empty fork 85 for fork in forks.iter() { 86 if fork.proposals.is_empty() { 87 debug!(target: "validator::consensus::generate_empty_fork", "An empty fork already exists."); 88 drop(forks); 89 return Ok(()) 90 } 91 } 92 let fork = Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?; 93 forks.push(fork); 94 drop(forks); 95 debug!(target: "validator::consensus::generate_empty_fork", "Fork generated!"); 96 Ok(()) 97 } 98 99 /// Given a proposal, the node verifys it and finds which fork it extends. 100 /// If the proposal extends the canonical blockchain, a new fork chain is created. 101 pub async fn append_proposal(&self, proposal: &Proposal, verify_fees: bool) -> Result<()> { 102 debug!(target: "validator::consensus::append_proposal", "Appending proposal {}", proposal.hash); 103 104 // Check if proposal already exists 105 let lock = self.forks.read().await; 106 for fork in lock.iter() { 107 for p in fork.proposals.iter().rev() { 108 if p == &proposal.hash { 109 drop(lock); 110 debug!(target: "validator::consensus::append_proposal", "Proposal {} already exists", proposal.hash); 111 return Err(Error::ProposalAlreadyExists) 112 } 113 } 114 } 115 // Check if proposal is canonical 116 if let Ok(canonical_headers) = 117 self.blockchain.blocks.get_order(&[proposal.block.header.height], true) 118 { 119 if canonical_headers[0].unwrap() == proposal.hash { 120 drop(lock); 121 debug!(target: "validator::consensus::append_proposal", "Proposal {} already exists", proposal.hash); 122 return Err(Error::ProposalAlreadyExists) 123 } 124 } 125 drop(lock); 126 127 // Verify proposal and grab corresponding fork 128 let (mut fork, index) = verify_proposal(self, proposal, verify_fees).await?; 129 130 // Append proposal to the fork 131 fork.append_proposal(proposal).await?; 132 133 // TODO: to keep memory usage low, we should only append forks that 134 // are higher ranking than our current best one 135 136 // If a fork index was found, replace forks with the mutated one, 137 // otherwise push the new fork. 138 let mut lock = self.forks.write().await; 139 match index { 140 Some(i) => { 141 if i < lock.len() && lock[i].proposals == fork.proposals[..fork.proposals.len() - 1] 142 { 143 lock[i] = fork; 144 } else { 145 lock.push(fork); 146 } 147 } 148 None => { 149 lock.push(fork); 150 } 151 } 152 drop(lock); 153 154 info!(target: "validator::consensus::append_proposal", "Appended proposal {}", proposal.hash); 155 156 Ok(()) 157 } 158 159 /// Given a proposal, find the fork chain it extends, and return its full clone. 160 /// If the proposal extends the fork not on its tail, a new fork is created and 161 /// we re-apply the proposals up to the extending one. If proposal extends canonical, 162 /// a new fork is created. Additionally, we return the fork index if a new fork 163 /// was not created, so caller can replace the fork. 164 pub async fn find_extended_fork(&self, proposal: &Proposal) -> Result<(Fork, Option<usize>)> { 165 // Grab a lock over current forks 166 let forks = self.forks.read().await; 167 168 // Check if proposal extends any fork 169 let found = find_extended_fork_index(&forks, proposal); 170 if found.is_err() { 171 if let Err(Error::ProposalAlreadyExists) = found { 172 return Err(Error::ProposalAlreadyExists) 173 } 174 175 // Check if proposal extends canonical 176 let (last_height, last_block) = self.blockchain.last()?; 177 if proposal.block.header.previous != last_block || 178 proposal.block.header.height <= last_height 179 { 180 return Err(Error::ExtendedChainIndexNotFound) 181 } 182 183 // Check if we have an empty fork to use 184 for (f_index, fork) in forks.iter().enumerate() { 185 if fork.proposals.is_empty() { 186 return Ok((forks[f_index].full_clone()?, Some(f_index))) 187 } 188 } 189 190 // Generate a new fork extending canonical 191 let fork = Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?; 192 return Ok((fork, None)) 193 } 194 195 let (f_index, p_index) = found.unwrap(); 196 let original_fork = &forks[f_index]; 197 // Check if proposal extends fork at last proposal 198 if p_index == (original_fork.proposals.len() - 1) { 199 return Ok((original_fork.full_clone()?, Some(f_index))) 200 } 201 202 // Rebuild fork 203 let mut fork = Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?; 204 fork.proposals = original_fork.proposals[..p_index + 1].to_vec(); 205 fork.diffs = original_fork.diffs[..p_index + 1].to_vec(); 206 207 // Retrieve proposals blocks from original fork 208 let blocks = &original_fork.overlay.lock().unwrap().get_blocks_by_hash(&fork.proposals)?; 209 for (index, block) in blocks.iter().enumerate() { 210 // Apply block diffs 211 fork.overlay.lock().unwrap().overlay.lock().unwrap().add_diff(&fork.diffs[index])?; 212 213 // Grab next mine target and difficulty 214 let (next_target, next_difficulty) = fork.module.next_mine_target_and_difficulty()?; 215 216 // Calculate block rank 217 let (target_distance_sq, hash_distance_sq) = block_rank(block, &next_target); 218 219 // Update PoW module 220 fork.module.append(block.header.timestamp, &next_difficulty); 221 222 // Update fork ranks 223 fork.targets_rank += target_distance_sq; 224 fork.hashes_rank += hash_distance_sq; 225 } 226 227 // Rebuild fork contracts states monotree 228 fork.compute_monotree()?; 229 230 // Drop forks lock 231 drop(forks); 232 233 Ok((fork, None)) 234 } 235 236 /// Check if best fork proposals can be confirmed. 237 /// Consensus confirmation logic: 238 /// - If the current best fork has reached greater length than the security threshold, 239 /// and no other fork exist with same rank, first proposal(s) in that fork can be 240 /// appended to canonical blockchain (confirme). 241 /// 242 /// When best fork can be confirmed, first block(s) should be appended to canonical, 243 /// and forks should be rebuilt. 244 pub async fn confirmation(&self) -> Result<Option<usize>> { 245 debug!(target: "validator::consensus::confirmation", "Started confirmation check"); 246 247 // Grab best fork 248 let forks = self.forks.read().await; 249 let index = best_fork_index(&forks)?; 250 let fork = &forks[index]; 251 252 // Check its length 253 let length = fork.proposals.len(); 254 if length < self.confirmation_threshold { 255 debug!(target: "validator::consensus::confirmation", "Nothing to confirme yet, best fork size: {length}"); 256 drop(forks); 257 return Ok(None) 258 } 259 260 // Drop forks lock 261 drop(forks); 262 263 Ok(Some(index)) 264 } 265 266 /// Auxiliary function to retrieve the fork header hash of provided height. 267 /// The fork is identified by the provided header hash. 268 pub async fn get_fork_header_hash( 269 &self, 270 height: u32, 271 fork_header: &HeaderHash, 272 ) -> Result<Option<HeaderHash>> { 273 // Grab a lock over current forks 274 let forks = self.forks.read().await; 275 276 // Find the fork containing the provided header 277 let mut found = None; 278 'outer: for (index, fork) in forks.iter().enumerate() { 279 for p in fork.proposals.iter().rev() { 280 if p == fork_header { 281 found = Some(index); 282 break 'outer 283 } 284 } 285 } 286 if found.is_none() { 287 drop(forks); 288 return Ok(None) 289 } 290 let index = found.unwrap(); 291 292 // Grab header if it exists 293 let header = forks[index].overlay.lock().unwrap().blocks.get_order(&[height], false)?[0]; 294 295 // Drop forks lock 296 drop(forks); 297 298 Ok(header) 299 } 300 301 /// Auxiliary function to retrieve the fork headers of provided hashes. 302 /// The fork is identified by the provided header hash. If fork doesn't 303 /// exists, an empty vector is returned. 304 pub async fn get_fork_headers( 305 &self, 306 headers: &[HeaderHash], 307 fork_header: &HeaderHash, 308 ) -> Result<Vec<Header>> { 309 // Grab a lock over current forks 310 let forks = self.forks.read().await; 311 312 // Find the fork containing the provided header 313 let mut found = None; 314 'outer: for (index, fork) in forks.iter().enumerate() { 315 for p in fork.proposals.iter().rev() { 316 if p == fork_header { 317 found = Some(index); 318 break 'outer 319 } 320 } 321 } 322 let Some(index) = found else { 323 drop(forks); 324 return Ok(vec![]) 325 }; 326 327 // Grab headers 328 let headers = forks[index].overlay.lock().unwrap().get_headers_by_hash(headers)?; 329 330 // Drop forks lock 331 drop(forks); 332 333 Ok(headers) 334 } 335 336 /// Auxiliary function to retrieve the fork proposals of provided hashes. 337 /// The fork is identified by the provided header hash. If fork doesn't 338 /// exists, an empty vector is returned. 339 pub async fn get_fork_proposals( 340 &self, 341 headers: &[HeaderHash], 342 fork_header: &HeaderHash, 343 ) -> Result<Vec<Proposal>> { 344 // Grab a lock over current forks 345 let forks = self.forks.read().await; 346 347 // Find the fork containing the provided header 348 let mut found = None; 349 'outer: for (index, fork) in forks.iter().enumerate() { 350 for p in fork.proposals.iter().rev() { 351 if p == fork_header { 352 found = Some(index); 353 break 'outer 354 } 355 } 356 } 357 let Some(index) = found else { 358 drop(forks); 359 return Ok(vec![]) 360 }; 361 362 // Grab proposals 363 let blocks = forks[index].overlay.lock().unwrap().get_blocks_by_hash(headers)?; 364 let mut proposals = Vec::with_capacity(blocks.len()); 365 for block in blocks { 366 proposals.push(Proposal::new(block)); 367 } 368 369 // Drop forks lock 370 drop(forks); 371 372 Ok(proposals) 373 } 374 375 /// Auxiliary function to retrieve a fork proposals, starting from provided tip. 376 /// If provided tip is too far behind, unknown, or fork doesn't exists, an empty 377 /// vector is returned. The fork is identified by the optional provided header hash. 378 /// If its `None`, we use our best fork. 379 pub async fn get_fork_proposals_after( 380 &self, 381 tip: HeaderHash, 382 fork_tip: Option<HeaderHash>, 383 limit: u32, 384 ) -> Result<Vec<Proposal>> { 385 // Grab a lock over current forks 386 let forks = self.forks.read().await; 387 388 // Create return vector 389 let mut proposals = vec![]; 390 391 // Grab fork index to use 392 let index = match fork_tip { 393 Some(fork_tip) => { 394 let mut found = None; 395 'outer: for (index, fork) in forks.iter().enumerate() { 396 for p in fork.proposals.iter().rev() { 397 if p == &fork_tip { 398 found = Some(index); 399 break 'outer 400 } 401 } 402 } 403 if found.is_none() { 404 drop(forks); 405 return Ok(proposals) 406 } 407 found.unwrap() 408 } 409 None => best_fork_index(&forks)?, 410 }; 411 412 // Check tip exists 413 let Ok(existing_tips) = forks[index].overlay.lock().unwrap().get_blocks_by_hash(&[tip]) 414 else { 415 drop(forks); 416 return Ok(proposals) 417 }; 418 419 // Check tip is not far behind 420 let last_block_height = forks[index].overlay.lock().unwrap().last()?.0; 421 if last_block_height - existing_tips[0].header.height >= limit { 422 drop(forks); 423 return Ok(proposals) 424 } 425 426 // Retrieve all proposals after requested one 427 let headers = self.blockchain.blocks.get_all_after(existing_tips[0].header.height)?; 428 let blocks = self.blockchain.get_blocks_by_hash(&headers)?; 429 for block in blocks { 430 proposals.push(Proposal::new(block)); 431 } 432 let blocks = 433 forks[index].overlay.lock().unwrap().get_blocks_by_hash(&forks[index].proposals)?; 434 for block in blocks { 435 proposals.push(Proposal::new(block)); 436 } 437 438 // Drop forks lock 439 drop(forks); 440 441 Ok(proposals) 442 } 443 444 /// Auxiliary function to retrieve current best fork last header. 445 /// If no forks exist, grab the last header from canonical. 446 pub async fn best_fork_last_header(&self) -> Result<(u32, HeaderHash)> { 447 // Grab a lock over current forks 448 let forks = self.forks.read().await; 449 450 // Check if node has any forks 451 if forks.is_empty() { 452 drop(forks); 453 return self.blockchain.last() 454 } 455 456 // Grab best fork 457 let fork = &forks[best_fork_index(&forks)?]; 458 459 // Grab its last header 460 let last = fork.last_proposal()?; 461 drop(forks); 462 Ok((last.block.header.height, last.hash)) 463 } 464 465 /// Auxiliary function to purge current forks and reset the ones starting 466 /// with the provided prefix, excluding provided confirmed fork. 467 /// Additionally, remove confirmed transactions from the forks mempools, 468 /// along with the unporposed transactions sled trees. 469 /// This function assumes that the prefix blocks have already been appended 470 /// to canonical chain from the confirmed fork. 471 pub async fn reset_forks( 472 &self, 473 prefix: &[HeaderHash], 474 confirmed_fork_index: &usize, 475 confirmed_txs: &[Transaction], 476 ) -> Result<()> { 477 // Grab a lock over current forks 478 let mut forks = self.forks.write().await; 479 480 // Find all the forks that start with the provided prefix, 481 // excluding confirmed fork index, and remove their prefixed 482 // proposals, and their corresponding diffs. 483 // If the fork is not starting with the provided prefix, 484 // drop it. Additionally, keep track of all the referenced 485 // trees in overlays that are valid. 486 let excess = prefix.len(); 487 let prefix_last_index = excess - 1; 488 let prefix_last = prefix.last().unwrap(); 489 let mut keep = vec![true; forks.len()]; 490 let mut referenced_trees = HashSet::new(); 491 let mut referenced_txs = HashSet::new(); 492 let confirmed_txs_hashes: Vec<TransactionHash> = 493 confirmed_txs.iter().map(|tx| tx.hash()).collect(); 494 for (index, fork) in forks.iter_mut().enumerate() { 495 if &index == confirmed_fork_index { 496 // Store its tree references 497 let fork_overlay = fork.overlay.lock().unwrap(); 498 let overlay = fork_overlay.overlay.lock().unwrap(); 499 for tree in &overlay.state.initial_tree_names { 500 referenced_trees.insert(tree.clone()); 501 } 502 for tree in &overlay.state.new_tree_names { 503 referenced_trees.insert(tree.clone()); 504 } 505 for tree in overlay.state.dropped_trees.keys() { 506 referenced_trees.insert(tree.clone()); 507 } 508 // Remove confirmed proposals txs from fork's mempool 509 fork.mempool.retain(|tx| !confirmed_txs_hashes.contains(tx)); 510 // Store its txs references 511 for tx in &fork.mempool { 512 referenced_txs.insert(*tx); 513 } 514 drop(overlay); 515 drop(fork_overlay); 516 continue 517 } 518 519 if fork.proposals.is_empty() || 520 prefix_last_index >= fork.proposals.len() || 521 &fork.proposals[prefix_last_index] != prefix_last 522 { 523 keep[index] = false; 524 continue 525 } 526 527 // Remove confirmed proposals txs from fork's mempool 528 fork.mempool.retain(|tx| !confirmed_txs_hashes.contains(tx)); 529 // Store its txs references 530 for tx in &fork.mempool { 531 referenced_txs.insert(*tx); 532 } 533 534 // Remove the commited differences 535 let rest_proposals = fork.proposals.split_off(excess); 536 let rest_diffs = fork.diffs.split_off(excess); 537 let mut diffs = fork.diffs.clone(); 538 fork.proposals = rest_proposals; 539 fork.diffs = rest_diffs; 540 for diff in diffs.iter_mut() { 541 fork.overlay.lock().unwrap().overlay.lock().unwrap().remove_diff(diff); 542 } 543 544 // Store its tree references 545 let fork_overlay = fork.overlay.lock().unwrap(); 546 let overlay = fork_overlay.overlay.lock().unwrap(); 547 for tree in &overlay.state.initial_tree_names { 548 referenced_trees.insert(tree.clone()); 549 } 550 for tree in &overlay.state.new_tree_names { 551 referenced_trees.insert(tree.clone()); 552 } 553 for tree in overlay.state.dropped_trees.keys() { 554 referenced_trees.insert(tree.clone()); 555 } 556 drop(overlay); 557 drop(fork_overlay); 558 } 559 560 // Find the trees and pending txs that are no longer referenced by valid forks 561 let mut dropped_trees = HashSet::new(); 562 let mut dropped_txs = HashSet::new(); 563 for (index, fork) in forks.iter_mut().enumerate() { 564 if keep[index] { 565 continue 566 } 567 for tx in &fork.mempool { 568 if !referenced_txs.contains(tx) { 569 dropped_txs.insert(*tx); 570 } 571 } 572 let fork_overlay = fork.overlay.lock().unwrap(); 573 let overlay = fork_overlay.overlay.lock().unwrap(); 574 for tree in &overlay.state.initial_tree_names { 575 if !referenced_trees.contains(tree) { 576 dropped_trees.insert(tree.clone()); 577 } 578 } 579 for tree in &overlay.state.new_tree_names { 580 if !referenced_trees.contains(tree) { 581 dropped_trees.insert(tree.clone()); 582 } 583 } 584 for tree in overlay.state.dropped_trees.keys() { 585 if !referenced_trees.contains(tree) { 586 dropped_trees.insert(tree.clone()); 587 } 588 } 589 drop(overlay); 590 drop(fork_overlay); 591 } 592 593 // Drop unreferenced trees from the database 594 for tree in dropped_trees { 595 self.blockchain.sled_db.drop_tree(tree)?; 596 } 597 598 // Drop invalid forks 599 let mut iter = keep.iter(); 600 forks.retain(|_| *iter.next().unwrap()); 601 602 // Remove confirmed proposals txs from the unporposed txs sled tree 603 self.blockchain.remove_pending_txs_hashes(&confirmed_txs_hashes)?; 604 605 // Remove unreferenced txs from the unporposed txs sled tree 606 self.blockchain.remove_pending_txs_hashes(&Vec::from_iter(dropped_txs))?; 607 608 // Drop forks lock 609 drop(forks); 610 611 Ok(()) 612 } 613 614 /// Auxiliary function to fully purge current forks and leave only a new empty fork. 615 pub async fn purge_forks(&self) -> Result<()> { 616 debug!(target: "validator::consensus::purge_forks", "Purging current forks..."); 617 let mut forks = self.forks.write().await; 618 *forks = vec![Fork::new(self.blockchain.clone(), self.module.read().await.clone()).await?]; 619 drop(forks); 620 debug!(target: "validator::consensus::purge_forks", "Forks purged!"); 621 Ok(()) 622 } 623 624 /// Auxiliary function to reset PoW module. 625 pub async fn reset_pow_module(&self) -> Result<()> { 626 debug!(target: "validator::consensus::reset_pow_module", "Resetting PoW module..."); 627 let mut module = self.module.write().await; 628 *module = PoWModule::new( 629 self.blockchain.clone(), 630 module.target, 631 module.fixed_difficulty.clone(), 632 None, 633 )?; 634 drop(module); 635 debug!(target: "validator::consensus::reset_pow_module", "PoW module reset successfully!"); 636 Ok(()) 637 } 638 639 /// Auxiliary function to check current contracts states checksums 640 /// Monotree(SMT) validity in all active forks and canonical. 641 pub async fn healthcheck(&self) -> Result<()> { 642 // Grab a lock over current forks 643 let lock = self.forks.read().await; 644 645 // Rebuild current canonical contract states checksums monotree 646 let state_monotree = self.blockchain.get_state_monotree()?; 647 648 // Check that the root matches last block header state root 649 let Some(state_root) = state_monotree.get_headroot()? else { 650 return Err(Error::ContractsStatesRootNotFoundError); 651 }; 652 let last_block_state_root = self.blockchain.last_header()?.state_root; 653 if state_root != last_block_state_root { 654 return Err(Error::ContractsStatesRootError( 655 blake3::Hash::from_bytes(state_root).to_string(), 656 blake3::Hash::from_bytes(last_block_state_root).to_string(), 657 )); 658 } 659 660 // Check each fork health 661 for fork in lock.iter() { 662 fork.healthcheck()?; 663 } 664 665 Ok(()) 666 } 667 } 668 669 /// This struct represents a block proposal, used for consensus. 670 #[derive(Debug, Clone, SerialEncodable, SerialDecodable)] 671 pub struct Proposal { 672 /// Block hash 673 pub hash: HeaderHash, 674 /// Block data 675 pub block: BlockInfo, 676 } 677 678 impl Proposal { 679 pub fn new(block: BlockInfo) -> Self { 680 let hash = block.hash(); 681 Self { hash, block } 682 } 683 } 684 685 impl From<Proposal> for BlockInfo { 686 fn from(proposal: Proposal) -> BlockInfo { 687 proposal.block 688 } 689 } 690 691 /// Struct representing a forked blockchain state. 692 /// 693 /// An overlay over the original blockchain is used, containing all pending to-write 694 /// records. Additionally, each fork keeps a vector of valid pending transactions hashes, 695 /// in order of receival, and the proposals hashes sequence, for validations. 696 #[derive(Clone)] 697 pub struct Fork { 698 /// Canonical (confirmed) blockchain 699 pub blockchain: Blockchain, 700 /// Overlay cache over canonical Blockchain 701 pub overlay: BlockchainOverlayPtr, 702 /// Current PoW module state 703 pub module: PoWModule, 704 /// Current contracts states checksums Monotree(SMT) 705 pub state_monotree: Monotree, 706 /// Fork proposal hashes sequence 707 pub proposals: Vec<HeaderHash>, 708 /// Fork proposal overlay diffs sequence 709 pub diffs: Vec<SledDbOverlayStateDiff>, 710 /// Valid pending transaction hashes 711 pub mempool: Vec<TransactionHash>, 712 /// Current fork mining targets rank, cached for better performance 713 pub targets_rank: BigUint, 714 /// Current fork hashes rank, cached for better performance 715 pub hashes_rank: BigUint, 716 } 717 718 impl Fork { 719 pub async fn new(blockchain: Blockchain, module: PoWModule) -> Result<Self> { 720 let mempool = blockchain.get_pending_txs()?.iter().map(|tx| tx.hash()).collect(); 721 let overlay = BlockchainOverlay::new(&blockchain)?; 722 // Build current contract states checksums monotree 723 let state_monotree = overlay.lock().unwrap().get_state_monotree()?; 724 // Retrieve last block difficulty to access current ranks 725 let last_difficulty = blockchain.last_block_difficulty()?; 726 let targets_rank = last_difficulty.ranks.targets_rank; 727 let hashes_rank = last_difficulty.ranks.hashes_rank; 728 Ok(Self { 729 blockchain, 730 overlay, 731 module, 732 state_monotree, 733 proposals: vec![], 734 diffs: vec![], 735 mempool, 736 targets_rank, 737 hashes_rank, 738 }) 739 } 740 741 /// Auxiliary function to append a proposal and update current fork rank. 742 pub async fn append_proposal(&mut self, proposal: &Proposal) -> Result<()> { 743 // Grab next mine target and difficulty 744 let (next_target, next_difficulty) = self.module.next_mine_target_and_difficulty()?; 745 746 // Calculate block rank 747 let (target_distance_sq, hash_distance_sq) = block_rank(&proposal.block, &next_target); 748 749 // Update fork ranks 750 self.targets_rank += target_distance_sq.clone(); 751 self.hashes_rank += hash_distance_sq.clone(); 752 753 // Generate block difficulty and update PoW module 754 let cumulative_difficulty = 755 self.module.cumulative_difficulty.clone() + next_difficulty.clone(); 756 let ranks = BlockRanks::new( 757 target_distance_sq, 758 self.targets_rank.clone(), 759 hash_distance_sq, 760 self.hashes_rank.clone(), 761 ); 762 let block_difficulty = BlockDifficulty::new( 763 proposal.block.header.height, 764 proposal.block.header.timestamp, 765 next_difficulty, 766 cumulative_difficulty, 767 ranks, 768 ); 769 self.module.append_difficulty(&self.overlay, block_difficulty)?; 770 771 // Push proposal's hash 772 self.proposals.push(proposal.hash); 773 774 // Push proposal overlay diff 775 self.diffs.push(self.overlay.lock().unwrap().overlay.lock().unwrap().diff(&self.diffs)?); 776 777 Ok(()) 778 } 779 780 /// Auxiliary function to retrieve last proposal. 781 pub fn last_proposal(&self) -> Result<Proposal> { 782 let block = if let Some(last) = self.proposals.last() { 783 self.overlay.lock().unwrap().get_blocks_by_hash(&[*last])?[0].clone() 784 } else { 785 self.overlay.lock().unwrap().last_block()? 786 }; 787 788 Ok(Proposal::new(block)) 789 } 790 791 /// Auxiliary function to compute forks' next block height. 792 pub fn get_next_block_height(&self) -> Result<u32> { 793 let proposal = self.last_proposal()?; 794 Ok(proposal.block.header.height + 1) 795 } 796 797 /// Auxiliary function to retrieve unproposed valid transactions, 798 /// along with their total gas used, total paid fees and the overlay 799 /// used to verify the transactions for further processing. 800 /// 801 /// Note: Always remember to purge new trees from the overlay if not needed. 802 pub async fn unproposed_txs( 803 &self, 804 blockchain: &Blockchain, 805 verifying_block_height: u32, 806 block_target: u32, 807 verify_fees: bool, 808 ) -> Result<(Vec<Transaction>, u64, u64, BlockchainOverlayPtr)> { 809 // Clone forks' overlay 810 let overlay = self.overlay.lock().unwrap().full_clone()?; 811 812 // Check if our mempool is not empty 813 if self.mempool.is_empty() { 814 return Ok((vec![], 0, 0, overlay)) 815 } 816 817 // Transactions Merkle tree 818 let mut tree = MerkleTree::new(1); 819 820 // Total gas accumulators 821 let mut total_gas_used = 0; 822 let mut total_gas_paid = 0; 823 824 // Map of ZK proof verifying keys for the current transaction batch 825 let mut vks: HashMap<[u8; 32], HashMap<String, VerifyingKey>> = HashMap::new(); 826 827 // Grab all current proposals transactions hashes 828 let proposals_txs = overlay.lock().unwrap().get_blocks_txs_hashes(&self.proposals)?; 829 830 // Iterate through all pending transactions in the forks' mempool 831 let mut unproposed_txs = vec![]; 832 for tx in &self.mempool { 833 // If the hash is contained in the proposals transactions vec, skip it 834 if proposals_txs.contains(tx) { 835 continue 836 } 837 838 // Retrieve the actual unproposed transaction 839 let unproposed_tx = 840 blockchain.transactions.get_pending(&[*tx], true)?[0].clone().unwrap(); 841 842 // Update the verifying keys map 843 for call in &unproposed_tx.calls { 844 vks.entry(call.data.contract_id.to_bytes()).or_default(); 845 } 846 847 // Verify the transaction against current state 848 overlay.lock().unwrap().checkpoint(); 849 let gas_data = match verify_transaction( 850 &overlay, 851 verifying_block_height, 852 block_target, 853 &unproposed_tx, 854 &mut tree, 855 &mut vks, 856 verify_fees, 857 ) 858 .await 859 { 860 Ok(gas_values) => gas_values, 861 Err(e) => { 862 debug!(target: "validator::consensus::unproposed_txs", "Transaction verification failed: {e}"); 863 overlay.lock().unwrap().revert_to_checkpoint()?; 864 continue 865 } 866 }; 867 868 // Store the gas used by the verified transaction 869 let tx_gas_used = gas_data.total_gas_used(); 870 871 // Calculate current accumulated gas usage 872 let accumulated_gas_usage = total_gas_used + tx_gas_used; 873 874 // Check gas limit - if accumulated gas used exceeds it, break out of loop 875 if accumulated_gas_usage > BLOCK_GAS_LIMIT { 876 warn!( 877 target: "validator::consensus::unproposed_txs", 878 "Retrieving transaction {tx} would exceed configured unproposed transaction gas limit: {accumulated_gas_usage} - {BLOCK_GAS_LIMIT}" 879 ); 880 overlay.lock().unwrap().revert_to_checkpoint()?; 881 break 882 } 883 884 // Update accumulated total gas 885 total_gas_used += tx_gas_used; 886 total_gas_paid += gas_data.paid; 887 888 // Push the tx hash into the unproposed transactions vector 889 unproposed_txs.push(unproposed_tx); 890 } 891 892 Ok((unproposed_txs, total_gas_used, total_gas_paid, overlay)) 893 } 894 895 /// Auxiliary function to create a full clone using BlockchainOverlay::full_clone. 896 /// Changes to this copy don't affect original fork overlay records, since underlying 897 /// overlay pointer have been updated to the cloned one. 898 pub fn full_clone(&self) -> Result<Self> { 899 let blockchain = self.blockchain.clone(); 900 let overlay = self.overlay.lock().unwrap().full_clone()?; 901 let module = self.module.clone(); 902 let state_monotree = self.state_monotree.clone(); 903 let proposals = self.proposals.clone(); 904 let diffs = self.diffs.clone(); 905 let mempool = self.mempool.clone(); 906 let targets_rank = self.targets_rank.clone(); 907 let hashes_rank = self.hashes_rank.clone(); 908 909 Ok(Self { 910 blockchain, 911 overlay, 912 module, 913 state_monotree, 914 proposals, 915 diffs, 916 mempool, 917 targets_rank, 918 hashes_rank, 919 }) 920 } 921 922 /// Build current contract states checksums monotree. 923 pub fn compute_monotree(&mut self) -> Result<()> { 924 self.state_monotree = self.overlay.lock().unwrap().get_state_monotree()?; 925 Ok(()) 926 } 927 928 /// Auxiliary function to check current contracts states checksums 929 /// Monotree(SMT) validity. 930 /// 931 /// Note: This should be executed on fresh forks and/or when 932 /// a fork doesn't contain changes over the last appended 933 // proposal. 934 pub fn healthcheck(&self) -> Result<()> { 935 // Rebuild current contract states checksums monotree 936 let state_monotree = self.overlay.lock().unwrap().get_state_monotree()?; 937 938 // Check that it matches forks' tree 939 let Some(state_root) = state_monotree.get_headroot()? else { 940 return Err(Error::ContractsStatesRootNotFoundError); 941 }; 942 let Some(fork_state_root) = self.state_monotree.get_headroot()? else { 943 return Err(Error::ContractsStatesRootNotFoundError); 944 }; 945 if state_root != fork_state_root { 946 return Err(Error::ContractsStatesRootError( 947 blake3::Hash::from_bytes(state_root).to_string(), 948 blake3::Hash::from_bytes(fork_state_root).to_string(), 949 )); 950 } 951 952 // Check that the root matches last block header state root 953 let last_block_state_root = self.last_proposal()?.block.header.state_root; 954 if state_root != last_block_state_root { 955 return Err(Error::ContractsStatesRootError( 956 blake3::Hash::from_bytes(state_root).to_string(), 957 blake3::Hash::from_bytes(last_block_state_root).to_string(), 958 )); 959 } 960 961 Ok(()) 962 } 963 }