advance.rs
1 // Copyright (c) 2025-2026 ACDC Network 2 // This file is part of the alphavm library. 3 // 4 // Alpha Chain | Delta Chain Protocol 5 // International Monetary Graphite. 6 // 7 // Derived from Aleo (https://aleo.org) and ProvableHQ (https://provable.com). 8 // They built world-class ZK infrastructure. We installed the EASY button. 9 // Their cryptography: elegant. Our modifications: bureaucracy-compatible. 10 // Original brilliance: theirs. Robert's Rules: ours. Bugs: definitely ours. 11 // 12 // Original Aleo/ProvableHQ code subject to Apache 2.0 https://www.apache.org/licenses/LICENSE-2.0 13 // All modifications and new work: CC0 1.0 Universal Public Domain Dedication. 14 // No rights reserved. No permission required. No warranty. No refunds. 15 // 16 // https://creativecommons.org/publicdomain/zero/1.0/ 17 // SPDX-License-Identifier: CC0-1.0 18 19 use super::*; 20 21 use anyhow::Context; 22 23 impl<N: Network, C: ConsensusStorage<N>> Ledger<N, C> { 24 /// Returns a candidate for the next block in the ledger, using a committed subdag and its transmissions. 25 /// This candidate can then be passed to [`Ledger::advance_to_next_block`] to be added to the ledger. 26 /// 27 /// # Panics 28 /// This function panics if called from an async context. 29 pub fn prepare_advance_to_next_quorum_block<R: Rng + CryptoRng>( 30 &self, 31 subdag: Subdag<N>, 32 transmissions: IndexMap<TransmissionID<N>, Transmission<N>>, 33 rng: &mut R, 34 ) -> Result<Block<N>> { 35 // Retrieve the latest block as the previous block (for the next block). 36 let previous_block = self.latest_block(); 37 38 // Decouple the transmissions into ratifications, solutions, and transactions. 39 let (ratifications, solutions, transactions) = decouple_transmissions(transmissions.into_iter())?; 40 // Currently, we do not support ratifications from the memory pool. 41 ensure!(ratifications.is_empty(), "Ratifications are currently unsupported from the memory pool"); 42 // Construct the block template. 43 let (header, ratifications, solutions, aborted_solution_ids, transactions, aborted_transaction_ids) = 44 self.construct_block_template(&previous_block, Some(&subdag), ratifications, solutions, transactions, rng)?; 45 46 // Construct the new quorum block. 47 Block::new_quorum( 48 previous_block.hash(), 49 header, 50 subdag, 51 ratifications, 52 solutions, 53 aborted_solution_ids, 54 transactions, 55 aborted_transaction_ids, 56 ) 57 } 58 59 /// Returns a candidate for the next block in the ledger. 60 /// This candidate can then be passed to [`Ledger::advance_to_next_block`] to be added to the ledger. 61 /// 62 /// Note, that beacon blocks are only used for testing purposes. 63 /// Production code will most likely used `[Ledger::prepare_advance_to_next_quorum_block`] instead. 64 /// 65 /// # Panics 66 /// This function panics if called from an async context. 67 pub fn prepare_advance_to_next_beacon_block<R: Rng + CryptoRng>( 68 &self, 69 private_key: &PrivateKey<N>, 70 candidate_ratifications: Vec<Ratify<N>>, 71 candidate_solutions: Vec<Solution<N>>, 72 candidate_transactions: Vec<Transaction<N>>, 73 rng: &mut R, 74 ) -> Result<Block<N>> { 75 // Currently, we do not support ratifications from the memory pool. 76 ensure!(candidate_ratifications.is_empty(), "Ratifications are currently unsupported from the memory pool"); 77 78 // Retrieve the latest block as the previous block (for the next block). 79 let previous_block = self.latest_block(); 80 81 // Construct the block template. 82 let (header, ratifications, solutions, aborted_solution_ids, transactions, aborted_transaction_ids) = self 83 .construct_block_template( 84 &previous_block, 85 None, 86 candidate_ratifications, 87 candidate_solutions, 88 candidate_transactions, 89 rng, 90 )?; 91 92 // Construct the new beacon block. 93 Block::new_beacon( 94 private_key, 95 previous_block.hash(), 96 header, 97 ratifications, 98 solutions, 99 aborted_solution_ids, 100 transactions, 101 aborted_transaction_ids, 102 rng, 103 ) 104 } 105 106 /// Adds the given block as the next block in the ledger. 107 /// 108 /// This function expects a valid block, that either was created by a trusted source, or successfully passed 109 /// the blocks checks (e.g. [`Ledger::check_next_block`]). 110 /// Note, that it is still possible that this function returns an error for a valid block, if there are concurrent tasks 111 /// updating the ledger. 112 /// 113 /// # Panics 114 /// This function panics if called from an async context. 115 pub fn advance_to_next_block(&self, block: &Block<N>) -> Result<()> { 116 // Acquire the write lock on the current block. 117 let mut current_block = self.current_block.write(); 118 // Check again for any possible race conditions. 119 if current_block.is_genesis()? { 120 // current block is initialized as the genesis block, but the ledger will 121 // also advance to it on startup. 122 ensure!( 123 current_block.height() == block.height() || current_block.height() + 1 == block.height(), 124 "The given block is not the direct successor of the latest block" 125 ); 126 } else { 127 ensure!(block.height() != 0, "Non-genesis blocks cannot have height 0"); 128 ensure!( 129 current_block.height() + 1 == block.height(), 130 "The given block is not the direct successor of the latest block" 131 ); 132 } 133 // Update the VM. 134 self.vm.add_next_block(block).with_context(|| "Failed to add block to VM")?; 135 // Update the current block. 136 *current_block = block.clone(); 137 // Drop the write lock on the current block. 138 drop(current_block); 139 140 // Update the cached committee from storage. 141 if let Ok(current_committee) = self.vm.finalize_store().committee_store().current_committee() { 142 *self.current_committee.write() = Some(current_committee); 143 } 144 145 // If the block is the start of a new epoch, or the epoch hash has not been set, 146 // update the current epoch hash and clear the epoch prover cache. 147 if block.height().is_multiple_of(N::NUM_BLOCKS_PER_EPOCH) || self.current_epoch_hash.read().is_none() { 148 // Update and log the current epoch hash. 149 match self.get_epoch_hash(block.height()).ok() { 150 Some(epoch_hash) => { 151 trace!("Updating the current epoch hash at block {} to '{epoch_hash}'", block.height()); 152 *self.current_epoch_hash.write() = Some(epoch_hash); 153 } 154 None => { 155 error!("Failed to update the current epoch hash at block {}", block.height()); 156 } 157 } 158 // Clear the epoch provers cache. 159 self.epoch_provers_cache.write().clear(); 160 } else { 161 // If the block is not part of a new epoch, add the new provers to the epoch prover cache. 162 if let Some(solutions) = block.solutions().as_deref() { 163 let mut epoch_provers_cache = self.epoch_provers_cache.write(); 164 for (_, s) in solutions.iter() { 165 let _ = *epoch_provers_cache.entry(s.address()).and_modify(|e| *e += 1).or_insert(1); 166 } 167 } 168 } 169 170 Ok(()) 171 } 172 } 173 174 /// Splits candidate solutions into a collection of accepted ones and aborted ones. 175 pub fn split_candidate_solutions<T, F>( 176 mut candidate_solutions: Vec<T>, 177 max_solutions: usize, 178 mut verification_fn: F, 179 ) -> (Vec<T>, Vec<T>) 180 where 181 T: Sized + Copy, 182 F: FnMut(&mut T) -> bool, 183 { 184 // Separate the candidate solutions into valid and aborted solutions. 185 let mut valid_candidate_solutions = Vec::with_capacity(max_solutions); 186 let mut aborted_candidate_solutions = Vec::new(); 187 // Reverse the candidate solutions in order to be able to chunk them more efficiently. 188 candidate_solutions.reverse(); 189 // Verify the candidate solutions in chunks. This is done so that we can potentially 190 // perform these operations in parallel while keeping the end result deterministic. 191 let chunk_size = 16; 192 while !candidate_solutions.is_empty() { 193 // Check if the collection of valid solutions is full. 194 if valid_candidate_solutions.len() >= max_solutions { 195 // If that's the case, mark the rest of the candidates as aborted. 196 aborted_candidate_solutions.extend(candidate_solutions.into_iter().rev()); 197 break; 198 } 199 200 // Split off a chunk of the candidate solutions. 201 let mut candidates_chunk = if candidate_solutions.len() > chunk_size { 202 candidate_solutions.split_off(candidate_solutions.len() - chunk_size) 203 } else { 204 std::mem::take(&mut candidate_solutions) 205 }; 206 207 // Verify the solutions in the chunk. 208 let verification_results = candidates_chunk.iter_mut().rev().map(|solution| { 209 let verified = verification_fn(solution); 210 (solution, verified) 211 }); 212 213 // Process the results of the verification. 214 for (solution, is_valid) in verification_results.into_iter() { 215 if is_valid && valid_candidate_solutions.len() < max_solutions { 216 valid_candidate_solutions.push(*solution); 217 } else { 218 aborted_candidate_solutions.push(*solution); 219 } 220 } 221 } 222 223 // The `aborted_candidate_solutions` can contain both verified and unverified solutions. 224 // When `check_solution_mut` is used as `verification_fn`, these aborted solutions 225 // may include both mutated and un-mutated variants. This occurs because the verification 226 // check is skipped once the `max_solutions` limit is reached. 227 // 228 // This approach is SAFE because currently, only the `solutionID` of aborted solutions is stored. 229 // However, if full aborted solutions need to be stored in the future, this logic will need to be revisited. 230 (valid_candidate_solutions, aborted_candidate_solutions) 231 } 232 233 impl<N: Network, C: ConsensusStorage<N>> Ledger<N, C> { 234 /// Constructs a block template for the next block in the ledger. 235 /// 236 /// # Panics 237 /// This function panics if called from an async context. 238 #[allow(clippy::type_complexity)] 239 fn construct_block_template<R: Rng + CryptoRng>( 240 &self, 241 previous_block: &Block<N>, 242 subdag: Option<&Subdag<N>>, 243 candidate_ratifications: Vec<Ratify<N>>, 244 candidate_solutions: Vec<Solution<N>>, 245 candidate_transactions: Vec<Transaction<N>>, 246 rng: &mut R, 247 ) -> Result<(Header<N>, Ratifications<N>, Solutions<N>, Vec<SolutionID<N>>, Transactions<N>, Vec<N::TransactionID>)> 248 { 249 // Construct the solutions. 250 let (solutions, aborted_solutions, solutions_root, combined_proof_target) = match candidate_solutions.is_empty() 251 { 252 true => (None, vec![], Field::<N>::zero(), 0u128), 253 false => { 254 // Retrieve the latest epoch hash. 255 let latest_epoch_hash = self.latest_epoch_hash()?; 256 // Retrieve the latest proof target. 257 let latest_proof_target = self.latest_proof_target(); 258 // Separate the candidate solutions into valid and aborted solutions. 259 let mut accepted_solutions: IndexMap<Address<N>, u64> = IndexMap::new(); 260 let (valid_candidate_solutions, aborted_candidate_solutions) = 261 split_candidate_solutions(candidate_solutions, N::MAX_SOLUTIONS, |solution| { 262 let prover_address = solution.address(); 263 let num_accepted_solutions = accepted_solutions.get(&prover_address).copied().unwrap_or(0); 264 // Check if the prover has reached their solution limit. 265 if self.is_solution_limit_reached(&prover_address, num_accepted_solutions) { 266 return false; 267 } 268 // Check if the solution is valid and update the number of accepted solutions. 269 match self.puzzle().check_solution_mut(solution, latest_epoch_hash, latest_proof_target) { 270 // Increment the number of accepted solutions for the prover. 271 Ok(()) => { 272 *accepted_solutions.entry(prover_address).or_insert(0) += 1; 273 true 274 } 275 // The solution is invalid, so we do not increment the number of accepted solutions. 276 Err(_) => false, 277 } 278 }); 279 280 // Check if there are any valid solutions. 281 match valid_candidate_solutions.is_empty() { 282 true => (None, aborted_candidate_solutions, Field::<N>::zero(), 0u128), 283 false => { 284 // Construct the solutions. 285 let solutions = PuzzleSolutions::new(valid_candidate_solutions)?; 286 // Compute the solutions root. 287 let solutions_root = solutions.to_accumulator_point()?; 288 // Compute the combined proof target. 289 let combined_proof_target = self.puzzle().get_combined_proof_target(&solutions)?; 290 // Output the solutions, solutions root, and combined proof target. 291 (Some(solutions), aborted_candidate_solutions, solutions_root, combined_proof_target) 292 } 293 } 294 } 295 }; 296 // Prepare the solutions. 297 let solutions = Solutions::from(solutions); 298 299 // Construct the aborted solution IDs. 300 let aborted_solution_ids = aborted_solutions.iter().map(Solution::id).collect::<Vec<_>>(); 301 302 // Retrieve the latest state root. 303 let latest_state_root = self.latest_state_root(); 304 // Retrieve the latest cumulative weight. 305 let latest_cumulative_weight = previous_block.cumulative_weight(); 306 307 // Compute the next round number. 308 let next_round = match subdag { 309 Some(subdag) => subdag.anchor_round(), 310 None => previous_block.round().saturating_add(1), 311 }; 312 // Compute the next height. 313 let next_height = previous_block.height().saturating_add(1); 314 // Determine the timestamp for the next block. 315 let next_timestamp = match subdag { 316 Some(subdag) => { 317 // Retrieve the previous committee lookback. 318 let previous_committee_lookback = { 319 // Calculate the penultimate round, which is the round before the anchor round. 320 let penultimate_round = subdag.anchor_round().saturating_sub(1); 321 // Output the committee lookback for the penultimate round. 322 self.get_committee_lookback_for_round(penultimate_round)? 323 .ok_or(anyhow!("Failed to fetch committee lookback for round {penultimate_round}"))? 324 }; 325 // Return the timestamp for the given committee lookback. 326 subdag.timestamp(&previous_committee_lookback) 327 } 328 None => OffsetDateTime::now_utc().unix_timestamp(), 329 }; 330 331 // BFT consensus: Use genesis constants for all PoW-related metadata fields. 332 // As of 2026-01-22, coinbase/PoW mining removed - targets stubbed to constants. 333 let next_coinbase_target = N::GENESIS_COINBASE_TARGET; 334 let next_proof_target = N::GENESIS_PROOF_TARGET; 335 let next_cumulative_proof_target = 0u128; // No PoW accumulation 336 let next_cumulative_weight = latest_cumulative_weight.saturating_add(combined_proof_target); 337 let next_last_coinbase_target = N::GENESIS_COINBASE_TARGET; 338 let next_last_coinbase_timestamp = N::GENESIS_TIMESTAMP; 339 340 // Determine if the block timestamp should be included. 341 let next_block_timestamp = 342 (next_height >= N::CONSENSUS_HEIGHT(ConsensusVersion::V12).unwrap_or_default()).then_some(next_timestamp); 343 // Construct the finalize state. 344 let state = FinalizeGlobalState::new::<N>( 345 next_round, 346 next_height, 347 next_block_timestamp, 348 next_cumulative_weight, 349 next_cumulative_proof_target, 350 previous_block.hash(), 351 )?; 352 // Speculate over the ratifications, solutions, and transactions. 353 // Note: As of 2026-01-22, coinbase rewards removed (BFT consensus only). 354 let (ratifications, transactions, aborted_transaction_ids, ratified_finalize_operations) = self.vm.speculate( 355 state, 356 next_timestamp.saturating_sub(previous_block.timestamp()), 357 candidate_ratifications, 358 &solutions, 359 candidate_transactions.iter(), 360 rng, 361 )?; 362 363 // Compute the ratifications root. 364 let ratifications_root = ratifications.to_ratifications_root()?; 365 366 // Construct the subdag root. 367 let subdag_root = match subdag { 368 Some(subdag) => subdag.to_subdag_root()?, 369 None => Field::zero(), 370 }; 371 372 // Construct the metadata. 373 let metadata = Metadata::new( 374 N::ID, 375 next_round, 376 next_height, 377 next_cumulative_weight, 378 next_cumulative_proof_target, 379 next_coinbase_target, 380 next_proof_target, 381 next_last_coinbase_target, 382 next_last_coinbase_timestamp, 383 next_timestamp, 384 )?; 385 386 // Construct the header. 387 let header = Header::from( 388 latest_state_root, 389 transactions.to_transactions_root()?, 390 transactions.to_finalize_root(ratified_finalize_operations)?, 391 ratifications_root, 392 solutions_root, 393 subdag_root, 394 metadata, 395 )?; 396 397 // Return the block template. 398 Ok((header, ratifications, solutions, aborted_solution_ids, transactions, aborted_transaction_ids)) 399 } 400 }