From eb974d1c13ceb12aad64dea5d04acdd1de7be450 Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Fri, 11 Apr 2025 01:15:21 +0800 Subject: [PATCH 01/21] feat(WIP): Optimize block indexing with BlockId, improve table structure and error handling Signed-off-by: will-bitlightlabs --- src/blocks.rs | 205 ++++++++++++++++++++++++++++++++++++++++++++++++-- src/db.rs | 90 +++++++++++++++++++++- 2 files changed, 285 insertions(+), 10 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index b0d91a9..f9d443a 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -25,8 +25,8 @@ use std::collections::HashSet; -use amplify::{ByteArray, FromSliceError}; use bprpc::BloomFilter32; +use amplify::{ByteArray, FromSliceError, hex}; use bpwallet::{Block, BlockHash}; use crossbeam_channel::{RecvError, SendError, Sender}; use microservices::USender; @@ -34,7 +34,9 @@ use redb::{CommitError, ReadableTable, StorageError, TableError}; use crate::ImporterMsg; use crate::db::{ - DbBlockHeader, DbMsg, DbTx, REC_TXNO, TABLE_BLKS, TABLE_MAIN, TABLE_TXES, TABLE_TXIDS, TxNo, + BlockId, DbBlockHeader, DbMsg, DbTx, REC_BLOCKID, REC_CHAIN, REC_ORPHANS, REC_TXNO, TABLE_BLKS, + TABLE_BLOCK_SPENDS, TABLE_BLOCKIDS, TABLE_CHAIN, TABLE_HEIGHTS, TABLE_INPUTS, TABLE_MAIN, + TABLE_OUTS, TABLE_SPKS, TABLE_TX_BLOCKS, TABLE_TXES, TABLE_TXIDS, TABLE_UTXOS, TxNo, }; const NAME: &str = "blockproc"; @@ -61,6 +63,7 @@ impl BlockProcessor { self.db.send(DbMsg::Write(tx))?; let db = rx.recv()?; + // Get current transaction number let mut txno = { let main = db .open_table(TABLE_MAIN) @@ -72,8 +75,31 @@ impl BlockProcessor { TxNo::from_slice(rec.value()).map_err(BlockProcError::TxNoInvalid)? }; + // Get or create the next block ID + let mut blockid = { + let main = db + .open_table(TABLE_MAIN) + .map_err(BlockProcError::MainTable)?; + match main + .get(REC_BLOCKID) + .map_err(|e| BlockProcError::Custom(format!("Block ID lookup error: {}", e)))? + { + Some(rec) => { + // Parse bytes into BlockId using from_bytes method + let mut bid = BlockId::from_bytes(rec.value()); + bid.inc_assign(); + bid + } + None => BlockId::start(), + } + }; + let mut count = 0; let process = || -> Result<(), BlockProcError> { + // Get previous block hash for chain validation + let prev_hash = block.header.prev_block_hash; + + // Store block header let mut table = db .open_table(TABLE_BLKS) .map_err(BlockProcError::BlockTable)?; @@ -81,23 +107,141 @@ impl BlockProcessor { .insert(id.to_byte_array(), DbBlockHeader::from(block.header)) .map_err(BlockProcError::BlockStorage)?; + // Map block hash to block ID + let mut blockids_table = db + .open_table(TABLE_BLOCKIDS) + .map_err(|e| BlockProcError::Custom(format!("Block IDs table error: {}", e)))?; + blockids_table + .insert(id.to_byte_array(), blockid) + .map_err(|e| BlockProcError::Custom(format!("Block ID storage error: {}", e)))?; + + // Store block height information + // For simplicity, we use the block ID value as the height + let height = blockid.as_u32(); + + // TODO: need to think about whether to delete redundancy or distinguish id and height + let mut heights_table = db + .open_table(TABLE_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; + heights_table + .insert(height, blockid) + .map_err(|e| BlockProcError::Custom(format!("Heights storage error: {}", e)))?; + + // Track UTXOs spent in this block + let mut block_spends = Vec::new(); + + // Process transactions in the block for tx in block.transactions { let txid = tx.txid(); txno.inc_assign(); - let mut table = db + // Store transaction ID to transaction number mapping + let mut txids_table = db .open_table(TABLE_TXIDS) .map_err(BlockProcError::TxidTable)?; - table + txids_table .insert(txid.to_byte_array(), txno) .map_err(BlockProcError::TxidStorage)?; - // TODO: Add remaining transaction information to other database tables + // Associate transaction with block ID + let mut tx_blocks_table = db + .open_table(TABLE_TX_BLOCKS) + .map_err(|e| BlockProcError::Custom(format!("Tx-blocks table error: {}", e)))?; + tx_blocks_table.insert(txno, blockid).map_err(|e| { + BlockProcError::Custom(format!("Tx-blocks storage error: {}", e)) + })?; + + // Process transaction inputs + for (vin_idx, input) in tx.inputs.iter().enumerate() { + if !input.prev_output.is_coinbase() { + let prev_txid = input.prev_output.txid; + let prev_vout = input.prev_output.vout; + + // Look up previous transaction number + if let Some(prev_txno) = txids_table + .get(prev_txid.to_byte_array()) + .map_err(BlockProcError::TxidLookup)? + .map(|v| v.value()) + { + // Mark UTXO as spent + let mut utxos_table = db.open_table(TABLE_UTXOS).map_err(|e| { + BlockProcError::Custom(format!("UTXOs table error: {}", e)) + })?; + utxos_table + .remove(&(prev_txno, prev_vout.into_u32())) + .map_err(|e| { + BlockProcError::Custom(format!("UTXOs removal error: {}", e)) + })?; + + // Record UTXO spent in this block + block_spends.push((prev_txno, prev_vout.into_u32())); + + // Record input-output mapping + let mut inputs_table = db.open_table(TABLE_INPUTS).map_err(|e| { + BlockProcError::Custom(format!("Inputs table error: {}", e)) + })?; + inputs_table + .insert((txno, vin_idx as u32), (prev_txno, prev_vout.into_u32())) + .map_err(|e| { + BlockProcError::Custom(format!("Inputs storage error: {}", e)) + })?; + + // Update spending relationships + let mut outs_table = db.open_table(TABLE_OUTS).map_err(|e| { + BlockProcError::Custom(format!("Outs table error: {}", e)) + })?; + let mut spending_txs = outs_table + .get(prev_txno) + .map_err(|e| { + BlockProcError::Custom(format!("Outs lookup error: {}", e)) + })? + .map(|v| v.value().to_vec()) + .unwrap_or_default(); + spending_txs.push(txno); + outs_table.insert(prev_txno, spending_txs).map_err(|e| { + BlockProcError::Custom(format!("Outs update error: {}", e)) + })?; + } + } + } - let mut table = db + // Process transaction outputs + for (vout_idx, output) in tx.outputs.iter().enumerate() { + // Add new UTXO + let mut utxos_table = db + .open_table(TABLE_UTXOS) + .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?; + utxos_table + .insert((txno, vout_idx as u32), ()) + .map_err(|e| { + BlockProcError::Custom(format!("UTXOs storage error: {}", e)) + })?; + + // Index script pubkey + let script = &output.script_pubkey; + if !script.is_empty() { + let mut spks_table = db.open_table(TABLE_SPKS).map_err(|e| { + BlockProcError::Custom(format!("SPKs table error: {}", e)) + })?; + let mut txnos = spks_table + .get(script.as_slice()) + .map_err(|e| { + BlockProcError::Custom(format!("SPKs lookup error: {}", e)) + })? + .map(|v| v.value().to_vec()) + .unwrap_or_default(); + txnos.push(txno); + spks_table.insert(script.as_slice(), txnos).map_err(|e| { + BlockProcError::Custom(format!("SPKs update error: {}", e)) + })?; + } + } + + // Store complete transaction + let mut txes_table = db .open_table(TABLE_TXES) .map_err(BlockProcError::TxesTable)?; - table + txes_table .insert(txno, DbTx::from(tx)) .map_err(BlockProcError::TxesStorage)?; @@ -109,14 +253,52 @@ impl BlockProcessor { count += 1; } + // Store UTXOs spent in this block + let mut block_spends_table = db + .open_table(TABLE_BLOCK_SPENDS) + .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?; + block_spends_table + .insert(blockid, block_spends) + .map_err(|e| { + BlockProcError::Custom(format!("Block spends storage error: {}", e)) + })?; + + // Update chain state + // Simplified approach - just append block to chain + let mut chain_table = db + .open_table(TABLE_CHAIN) + .map_err(|e| BlockProcError::Custom(format!("Chain table error: {}", e)))?; + + // Get current chain + let current_chain = chain_table + .get(REC_CHAIN) + .map_err(|e| BlockProcError::Custom(format!("Chain lookup error: {}", e)))? + .map(|v| v.value().to_vec()) + .unwrap_or_default(); + + // Append to main chain + let mut new_chain = current_chain; + new_chain.push(blockid); + chain_table + .insert(REC_CHAIN, new_chain) + .map_err(|e| BlockProcError::Custom(format!("Chain update error: {}", e)))?; + + // Update global counters let mut main = db .open_table(TABLE_MAIN) .map_err(BlockProcError::MainTable)?; + + // Update transaction counter main.insert(REC_TXNO, txno.to_byte_array().as_slice()) .map_err(BlockProcError::TxNoUpdate)?; + // Update block ID counter + main.insert(REC_BLOCKID, &blockid.to_bytes()) + .map_err(|e| BlockProcError::Custom(format!("Block ID update error: {}", e)))?; + Ok(()) }; + if let Err(e) = process() { if let Err(err) = db.abort() { log::warn!(target: NAME, "Unable to abort failed database transaction due to {err}"); @@ -177,4 +359,13 @@ pub enum BlockProcError { /// Unable to write to transactions table: {0} TxesStorage(StorageError), + + /// Error looking up transaction ID: {0} + TxidLookup(StorageError), + + /// Unable to find block: {0} + BlockLookup(StorageError), + + /// Custom error: {0} + Custom(String), } diff --git a/src/db.rs b/src/db.rs index 698f790..1993386 100644 --- a/src/db.rs +++ b/src/db.rs @@ -25,7 +25,7 @@ use std::cmp::Ordering; use std::ops::ControlFlow; use std::path::Path; -use amplify::num::u40; +use amplify::num::{u24, u40}; use amplify::{ByteArray, FromSliceError}; use bpwallet::{BlockHeader, ConsensusDecode, ConsensusEncode, Tx}; use crossbeam_channel::{SendError, Sender}; @@ -39,12 +39,36 @@ use redb::{ #[display("#{0:010X}")] pub struct TxNo(u40); +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Display)] +#[display("#{0:08X}")] +pub struct BlockId(u32); + impl TxNo { pub fn start() -> Self { TxNo(u40::ONE) } pub fn inc_assign(&mut self) { self.0 += u40::ONE } } +impl BlockId { + pub fn start() -> Self { BlockId(0) } + + pub fn inc_assign(&mut self) { self.0 += 1 } + + // Method to access the u32 value + pub fn as_u32(&self) -> u32 { self.0 } + + // Method to get bytes representation + pub fn to_bytes(&self) -> [u8; 4] { self.0.to_be_bytes() } + + // Method to create BlockId from bytes + pub fn from_bytes(bytes: &[u8]) -> Self { + debug_assert_eq!(bytes.len(), 4); + let mut array = [0u8; 4]; + array.copy_from_slice(bytes); + BlockId(u32::from_be_bytes(array)) + } +} + impl ByteArray<5> for TxNo { fn from_byte_array(val: impl Into<[u8; 5]>) -> Self { Self(u40::from_be_bytes(val.into())) } @@ -134,14 +158,74 @@ impl redb::Value for DbTx { fn type_name() -> TypeName { TypeName::new("BpNodeTx") } } +impl redb::Key for BlockId { + fn compare(data1: &[u8], data2: &[u8]) -> Ordering { data1.cmp(data2) } +} + +impl redb::Value for BlockId { + type SelfType<'a> = Self; + + type AsBytes<'a> = [u8; 4]; + + fn fixed_width() -> Option { Some(4) } + + fn from_bytes<'a>(data: &'a [u8]) -> Self::SelfType<'a> + where Self: 'a { + BlockId::from_bytes(data) + } + + fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> + where Self: 'b { + value.to_bytes() + } + + fn type_name() -> TypeName { TypeName::new("BpNodeBlockId") } +} + +pub const REC_TXNO: &str = "txno"; +pub const REC_BLOCKID: &str = "blockid"; +pub const REC_CHAIN: &str = "chain"; +pub const REC_ORPHANS: &str = "orphans"; + +// Main metadata table storing global counters and states pub const TABLE_MAIN: TableDefinition<&'static str, &[u8]> = TableDefinition::new("main"); + +// Maps block hash to block header pub const TABLE_BLKS: TableDefinition<[u8; 32], DbBlockHeader> = TableDefinition::new("blocks"); + +// Maps transaction ID to internal transaction number pub const TABLE_TXIDS: TableDefinition<[u8; 32], TxNo> = TableDefinition::new("txids"); + +// Maps block hash to internal block ID +pub const TABLE_BLOCKIDS: TableDefinition<[u8; 32], BlockId> = TableDefinition::new("blockids"); + +// Stores complete transaction data pub const TABLE_TXES: TableDefinition = TableDefinition::new("transactions"); + +// Maps transaction number to transaction numbers that spend its outputs pub const TABLE_OUTS: TableDefinition> = TableDefinition::new("spends"); -pub const TABLE_SPKS: TableDefinition<&[u8], TxNo> = TableDefinition::new("scripts"); -pub const REC_TXNO: &str = "txno"; +// Maps script pubkey to a list of transaction numbers containing it +pub const TABLE_SPKS: TableDefinition<&[u8], Vec> = TableDefinition::new("scripts"); + +// Tracks unspent transaction outputs +pub const TABLE_UTXOS: TableDefinition<(TxNo, u32), ()> = TableDefinition::new("utxos"); + +// Maps block height to block ID +pub const TABLE_HEIGHTS: TableDefinition = TableDefinition::new("block_heights"); + +// Maps transaction number to the block ID it belongs to +pub const TABLE_TX_BLOCKS: TableDefinition = TableDefinition::new("tx_blocks"); + +// Maps transaction input to the output it spends +pub const TABLE_INPUTS: TableDefinition<(TxNo, u32), (TxNo, u32)> = TableDefinition::new("inputs"); + +// Tracks the active chain and orphaned blocks +pub const TABLE_CHAIN: TableDefinition<&'static str, Vec> = TableDefinition::new("chain"); + +// Records UTXOs spent in each block for reorg handling +pub const TABLE_BLOCK_SPENDS: TableDefinition> = + TableDefinition::new("block_spends"); pub struct IndexDb(Database); From 1bcda1c94d51855bbedb7cfb7ee299717ad128d1 Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Fri, 11 Apr 2025 13:39:04 +0800 Subject: [PATCH 02/21] Enhance indexer with metadata for network, height, and block attributes Signed-off-by: will-bitlightlabs --- src/blocks.rs | 124 +++++++++++++++++++++++++++++++++++++++++++----- src/db.rs | 5 +- src/importer.rs | 16 ++++++- 3 files changed, 128 insertions(+), 17 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index f9d443a..0d0fbca 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -26,8 +26,8 @@ use std::collections::HashSet; use bprpc::BloomFilter32; -use amplify::{ByteArray, FromSliceError, hex}; -use bpwallet::{Block, BlockHash}; +use amplify::{ByteArray, Bytes32, FromSliceError, hex}; +use bpwallet::{Block, BlockHash, Network, Txid}; use crossbeam_channel::{RecvError, SendError, Sender}; use microservices::USender; use redb::{CommitError, ReadableTable, StorageError, TableError}; @@ -41,6 +41,21 @@ use crate::db::{ const NAME: &str = "blockproc"; +// Network information record in main table +pub const REC_NETWORK: &str = "network"; + +// Genesis block hashes for different networks +const GENESIS_HASH_MAINNET: &str = + "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"; +const GENESIS_HASH_TESTNET3: &str = + "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"; +const GENESIS_HASH_TESTNET4: &str = + "00000000da84f2bafbbc53dee25a72ae507ff4914b867c565be350b0da8bf043"; +const GENESIS_HASH_SIGNET: &str = + "00000008819873e925422c1ff0f99f7cc9bbb232af63a077a480a3633bee1ef6"; +const GENESIS_HASH_REGTEST: &str = + "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"; + pub struct BlockProcessor { db: USender, broker: Sender, @@ -58,6 +73,43 @@ impl BlockProcessor { self.tracking.retain(|filter| !filters.contains(filter)); } + // Helper function to determine network from block hash + fn detect_network_from_genesis(blockhash: &BlockHash) -> Option { + let hash_str = blockhash.to_string(); + match hash_str.as_str() { + GENESIS_HASH_MAINNET => Some(Network::Mainnet), + GENESIS_HASH_TESTNET3 => Some(Network::Testnet3), + GENESIS_HASH_TESTNET4 => Some(Network::Testnet4), + GENESIS_HASH_SIGNET => Some(Network::Signet), + GENESIS_HASH_REGTEST => Some(Network::Regtest), + _ => None, + } + } + + // Helper function to calculate block height + fn calculate_block_height( + &self, + block: &Block, + blockid: BlockId, + ) -> Result { + // For genesis block, height is always 0 + // Check for all zeros hash which is the genesis block's prev_hash + let zero_hash = [0u8; 32]; + if block.header.prev_block_hash.to_byte_array() == zero_hash { + return Ok(0); + } + + // For simplicity in this implementation, we'll use block ID as fallback + // When proper reorg handling is implemented this should be revisited + // The proper height calculation would include blockchain state analysis + + // For now, if this is genesis block (blockid == 0), return 0 + // otherwise, simply use blockid as height which will be roughly equivalent + // This simplifies the logic while maintaining the distinction between concepts + + Ok(blockid.as_u32()) + } + pub fn process_block(&mut self, id: BlockHash, block: Block) -> Result { let (tx, rx) = crossbeam_channel::bounded(1); self.db.send(DbMsg::Write(tx))?; @@ -76,7 +128,7 @@ impl BlockProcessor { }; // Get or create the next block ID - let mut blockid = { + let blockid = { let main = db .open_table(TABLE_MAIN) .map_err(BlockProcError::MainTable)?; @@ -94,11 +146,27 @@ impl BlockProcessor { } }; + // Check for genesis block if this is block ID 0 + if blockid.as_u32() == 0 { + // For genesis block, detect and store network information + let network = Self::detect_network_from_genesis(&id) + .ok_or_else(|| BlockProcError::Custom("Unknown genesis block hash".to_string()))?; + + let mut main = db + .open_table(TABLE_MAIN) + .map_err(BlockProcError::MainTable)?; + + // Store network information + main.insert(REC_NETWORK, network.to_string().as_bytes()) + .map_err(|e| { + BlockProcError::Custom(format!("Failed to store network info: {}", e)) + })?; + + log::info!(target: NAME, "Initialized with genesis block for network: {}", network); + } + let mut count = 0; let process = || -> Result<(), BlockProcError> { - // Get previous block hash for chain validation - let prev_hash = block.header.prev_block_hash; - // Store block header let mut table = db .open_table(TABLE_BLKS) @@ -115,14 +183,44 @@ impl BlockProcessor { .insert(id.to_byte_array(), blockid) .map_err(|e| BlockProcError::Custom(format!("Block ID storage error: {}", e)))?; - // Store block height information - // For simplicity, we use the block ID value as the height - let height = blockid.as_u32(); + // Calculate the block height based on previous block instead of using blockid + // This is crucial for maintaining correct block heights during chain reorganizations + let height = self.calculate_block_height(&block, blockid)?; - // TODO: need to think about whether to delete redundancy or distinguish id and height + log::debug!( + target: NAME, + "Processing block {} at height {} with internal ID {}", + id, + height, + blockid + ); + + // Store block height information let mut heights_table = db .open_table(TABLE_HEIGHTS) .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; + + // Check if we already have a block at this height + if let Some(existing_blockid) = heights_table + .get(height) + .map_err(|e| BlockProcError::Custom(format!("Heights lookup error: {}", e)))? + .map(|v| v.value()) + { + // If different block at this height, we have a potential reorg + if existing_blockid != blockid { + log::warn!( + target: NAME, + "Detected potential chain reorganization at height {}: replacing block ID {} with {}", + height, + existing_blockid, + blockid + ); + + // TODO: Implement full reorg handling + // For now, we'll just overwrite the existing entry + } + } + heights_table .insert(height, blockid) .map_err(|e| BlockProcError::Custom(format!("Heights storage error: {}", e)))?; @@ -245,8 +343,8 @@ impl BlockProcessor { .insert(txno, DbTx::from(tx)) .map_err(BlockProcError::TxesStorage)?; - // TODO: If txid match `tracking` Bloom filters, send information to the broker - if false { + // Check if transaction ID is in tracking list and notify if needed + if self.tracking.contains(&txid) { self.broker.send(ImporterMsg::Mined(txid))?; } @@ -293,7 +391,7 @@ impl BlockProcessor { .map_err(BlockProcError::TxNoUpdate)?; // Update block ID counter - main.insert(REC_BLOCKID, &blockid.to_bytes()) + main.insert(REC_BLOCKID, &blockid.to_bytes().as_slice()) .map_err(|e| BlockProcError::Custom(format!("Block ID update error: {}", e)))?; Ok(()) diff --git a/src/db.rs b/src/db.rs index 1993386..429365e 100644 --- a/src/db.rs +++ b/src/db.rs @@ -25,7 +25,7 @@ use std::cmp::Ordering; use std::ops::ControlFlow; use std::path::Path; -use amplify::num::{u24, u40}; +use amplify::num::u40; use amplify::{ByteArray, FromSliceError}; use bpwallet::{BlockHeader, ConsensusDecode, ConsensusEncode, Tx}; use crossbeam_channel::{SendError, Sender}; @@ -44,12 +44,13 @@ pub struct TxNo(u40); pub struct BlockId(u32); impl TxNo { - pub fn start() -> Self { TxNo(u40::ONE) } + pub fn start() -> Self { TxNo(u40::ZERO) } pub fn inc_assign(&mut self) { self.0 += u40::ONE } } impl BlockId { + // 0 corresponds to the genesis block, and the height is aligned with other indexers pub fn start() -> Self { BlockId(0) } pub fn inc_assign(&mut self) { self.0 += 1 } diff --git a/src/importer.rs b/src/importer.rs index 8416c78..4ca803a 100644 --- a/src/importer.rs +++ b/src/importer.rs @@ -126,10 +126,22 @@ impl ServiceController for BlockI fn on_command(&mut self, cmd: ImporterCmd) { match cmd { ImporterCmd::TrackTxid(filters) => { - self.processor.track(filters); + self.processor.track( + filters + .into_iter() + .map(|a| a.to_byte_array()) + .map(Txid::from) + .collect(), + ); } ImporterCmd::Untrack(filters) => { - self.processor.untrack(filters); + self.processor.untrack( + filters + .into_iter() + .map(|a| a.to_byte_array()) + .map(Txid::from) + .collect(), + ); } } } From 8648d48ac20c723101269c463d622d149b77d84b Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Fri, 11 Apr 2025 21:05:13 +0800 Subject: [PATCH 03/21] fix: improve transaction tracking with bloom filters Signed-off-by: will-bitlightlabs --- src/blocks.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index 0d0fbca..28a230d 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -25,8 +25,8 @@ use std::collections::HashSet; -use bprpc::BloomFilter32; use amplify::{ByteArray, Bytes32, FromSliceError, hex}; +use bprpc::BloomFilter32; use bpwallet::{Block, BlockHash, Network, Txid}; use crossbeam_channel::{RecvError, SendError, Sender}; use microservices::USender; @@ -344,7 +344,15 @@ impl BlockProcessor { .map_err(BlockProcError::TxesStorage)?; // Check if transaction ID is in tracking list and notify if needed - if self.tracking.contains(&txid) { + let txid_bytes = txid.to_byte_array(); + let mut should_notify = false; + for filter in &self.tracking { + if filter.contains(&txid_bytes) { + should_notify = true; + break; + } + } + if should_notify { self.broker.send(ImporterMsg::Mined(txid))?; } From 92ff4bce327c91ec347043d256470f5e218c410e Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Sat, 12 Apr 2025 00:34:24 +0800 Subject: [PATCH 04/21] refactor: adopt single-chain design with explicit network configuration at initialization Signed-off-by: will-bitlightlabs --- src/bin/bpd.rs | 102 ++++++++++++++++++++++++++++++++++++++++++++++-- src/blocks.rs | 96 ++++++++++++--------------------------------- src/config.rs | 5 +++ src/db.rs | 7 ++-- src/importer.rs | 16 +------- src/lib.rs | 8 +++- 6 files changed, 140 insertions(+), 94 deletions(-) diff --git a/src/bin/bpd.rs b/src/bin/bpd.rs index fc23d93..bd7268d 100644 --- a/src/bin/bpd.rs +++ b/src/bin/bpd.rs @@ -79,15 +79,109 @@ fn main() -> Status { ); exit(3); } - if let Err(err) = Database::create(&index_path) { - eprintln!("unable to create index database.\n{err}"); - exit(4); + + // Create the database + let db = match Database::create(&index_path) { + Ok(db) => db, + Err(err) => { + eprintln!("unable to create index database.\n{err}"); + exit(4); + } + }; + + // Initialize database with network information + let network = opts.general.network; + match db.begin_write() { + Ok(tx) => { + match tx.open_table(bpnode::db::TABLE_MAIN) { + Ok(mut main_table) => { + if let Err(err) = main_table + .insert(bpnode::REC_NETWORK, network.to_string().as_bytes()) + { + eprintln!("Failed to write network information to database: {err}"); + exit(5); + } + } + Err(err) => { + eprintln!("Failed to open main table in database: {err}"); + exit(6); + } + } + + if let Err(err) = tx.commit() { + eprintln!("Failed to commit initial database transaction: {err}"); + exit(7); + } + } + Err(err) => { + eprintln!("Failed to begin database transaction: {err}"); + exit(8); + } } - eprintln!("index database initialized, exiting"); + + eprintln!("index database initialized for {} network, exiting", network); Status(Ok(())) } None => { let conf = Config::from(opts); + let index_path = conf.data_dir.join(PATH_INDEXDB); + + // Check if the database exists + if let Ok(true) = fs::exists(&index_path) { + // Open the database to check network configuration + match Database::open(&index_path) { + Ok(db) => { + // Check stored network matches configured network + if let Ok(tx) = db.begin_read() { + if let Ok(main_table) = tx.open_table(bpnode::db::TABLE_MAIN) { + if let Ok(Some(network_rec)) = main_table.get(bpnode::REC_NETWORK) { + let stored_network = + String::from_utf8_lossy(network_rec.value()); + if stored_network != conf.network.to_string() { + eprintln!("ERROR: Database network mismatch!"); + eprintln!("Configured network: {}", conf.network); + eprintln!("Database network: {}", stored_network); + eprintln!( + "Each BP-Node instance works with a single chain." + ); + eprintln!( + "To use a different network, create a separate \ + instance with a different data directory." + ); + exit(9); + } + log::info!( + "Database network matches configured network: {}", + stored_network + ); + } else { + // Network information not found in the database + eprintln!( + "ERROR: Database exists but doesn't contain network \ + information." + ); + eprintln!( + "Please reinitialize the database with 'bpd init' command." + ); + exit(10); + } + } + } + } + Err(err) => { + eprintln!( + "Warning: Could not open database to check network configuration: {}", + err + ); + } + } + } else { + eprintln!( + "ERROR: Database not found! Please initialize with 'bpd init' command first." + ); + exit(11); + } + Status(Broker::start(conf).and_then(|runtime| runtime.run())) } } diff --git a/src/blocks.rs b/src/blocks.rs index 28a230d..329eead 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -25,9 +25,9 @@ use std::collections::HashSet; -use amplify::{ByteArray, Bytes32, FromSliceError, hex}; +use amplify::{ByteArray, FromSliceError}; use bprpc::BloomFilter32; -use bpwallet::{Block, BlockHash, Network, Txid}; +use bpwallet::{Block, BlockHash}; use crossbeam_channel::{RecvError, SendError, Sender}; use microservices::USender; use redb::{CommitError, ReadableTable, StorageError, TableError}; @@ -35,8 +35,8 @@ use redb::{CommitError, ReadableTable, StorageError, TableError}; use crate::ImporterMsg; use crate::db::{ BlockId, DbBlockHeader, DbMsg, DbTx, REC_BLOCKID, REC_CHAIN, REC_ORPHANS, REC_TXNO, TABLE_BLKS, - TABLE_BLOCK_SPENDS, TABLE_BLOCKIDS, TABLE_CHAIN, TABLE_HEIGHTS, TABLE_INPUTS, TABLE_MAIN, - TABLE_OUTS, TABLE_SPKS, TABLE_TX_BLOCKS, TABLE_TXES, TABLE_TXIDS, TABLE_UTXOS, TxNo, + TABLE_BLOCK_SPENDS, TABLE_BLOCKIDS, TABLE_HEIGHTS, TABLE_INPUTS, TABLE_MAIN, TABLE_OUTS, + TABLE_SPKS, TABLE_TX_BLOCKS, TABLE_TXES, TABLE_TXIDS, TABLE_UTXOS, TxNo, }; const NAME: &str = "blockproc"; @@ -44,18 +44,6 @@ const NAME: &str = "blockproc"; // Network information record in main table pub const REC_NETWORK: &str = "network"; -// Genesis block hashes for different networks -const GENESIS_HASH_MAINNET: &str = - "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"; -const GENESIS_HASH_TESTNET3: &str = - "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"; -const GENESIS_HASH_TESTNET4: &str = - "00000000da84f2bafbbc53dee25a72ae507ff4914b867c565be350b0da8bf043"; -const GENESIS_HASH_SIGNET: &str = - "00000008819873e925422c1ff0f99f7cc9bbb232af63a077a480a3633bee1ef6"; -const GENESIS_HASH_REGTEST: &str = - "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"; - pub struct BlockProcessor { db: USender, broker: Sender, @@ -73,19 +61,6 @@ impl BlockProcessor { self.tracking.retain(|filter| !filters.contains(filter)); } - // Helper function to determine network from block hash - fn detect_network_from_genesis(blockhash: &BlockHash) -> Option { - let hash_str = blockhash.to_string(); - match hash_str.as_str() { - GENESIS_HASH_MAINNET => Some(Network::Mainnet), - GENESIS_HASH_TESTNET3 => Some(Network::Testnet3), - GENESIS_HASH_TESTNET4 => Some(Network::Testnet4), - GENESIS_HASH_SIGNET => Some(Network::Signet), - GENESIS_HASH_REGTEST => Some(Network::Regtest), - _ => None, - } - } - // Helper function to calculate block height fn calculate_block_height( &self, @@ -99,9 +74,10 @@ impl BlockProcessor { return Ok(0); } - // For simplicity in this implementation, we'll use block ID as fallback - // When proper reorg handling is implemented this should be revisited - // The proper height calculation would include blockchain state analysis + // Since each BP-Node instance works with a single chain, + // for simplicity we use block ID as a height fallback. + // In a multi-chain system, we would need more sophisticated height calculation. + // When proper reorg handling is implemented this should be revisited. // For now, if this is genesis block (blockid == 0), return 0 // otherwise, simply use blockid as height which will be roughly equivalent @@ -146,25 +122,6 @@ impl BlockProcessor { } }; - // Check for genesis block if this is block ID 0 - if blockid.as_u32() == 0 { - // For genesis block, detect and store network information - let network = Self::detect_network_from_genesis(&id) - .ok_or_else(|| BlockProcError::Custom("Unknown genesis block hash".to_string()))?; - - let mut main = db - .open_table(TABLE_MAIN) - .map_err(BlockProcError::MainTable)?; - - // Store network information - main.insert(REC_NETWORK, network.to_string().as_bytes()) - .map_err(|e| { - BlockProcError::Custom(format!("Failed to store network info: {}", e)) - })?; - - log::info!(target: NAME, "Initialized with genesis block for network: {}", network); - } - let mut count = 0; let process = || -> Result<(), BlockProcError> { // Store block header @@ -217,6 +174,12 @@ impl BlockProcessor { ); // TODO: Implement full reorg handling + // In a single-chain BP-Node instance, reorgs are detected when a different + // block is encountered at the same height. The proper handling would include: + // 1. Finding the common ancestor block + // 2. Rolling back transactions in the old chain branch + // 3. Applying transactions from the new chain branch + // 4. Updating UTXO set accordingly // For now, we'll just overwrite the existing entry } } @@ -347,7 +310,7 @@ impl BlockProcessor { let txid_bytes = txid.to_byte_array(); let mut should_notify = false; for filter in &self.tracking { - if filter.contains(&txid_bytes) { + if filter.contains(txid_bytes) { should_notify = true; break; } @@ -369,26 +332,6 @@ impl BlockProcessor { BlockProcError::Custom(format!("Block spends storage error: {}", e)) })?; - // Update chain state - // Simplified approach - just append block to chain - let mut chain_table = db - .open_table(TABLE_CHAIN) - .map_err(|e| BlockProcError::Custom(format!("Chain table error: {}", e)))?; - - // Get current chain - let current_chain = chain_table - .get(REC_CHAIN) - .map_err(|e| BlockProcError::Custom(format!("Chain lookup error: {}", e)))? - .map(|v| v.value().to_vec()) - .unwrap_or_default(); - - // Append to main chain - let mut new_chain = current_chain; - new_chain.push(blockid); - chain_table - .insert(REC_CHAIN, new_chain) - .map_err(|e| BlockProcError::Custom(format!("Chain update error: {}", e)))?; - // Update global counters let mut main = db .open_table(TABLE_MAIN) @@ -402,6 +345,15 @@ impl BlockProcessor { main.insert(REC_BLOCKID, &blockid.to_bytes().as_slice()) .map_err(|e| BlockProcError::Custom(format!("Block ID update error: {}", e)))?; + // Log successful block processing + log::debug!( + target: NAME, + "Successfully processed block {} at height {} with {} transactions", + id, + height, + count + ); + Ok(()) }; diff --git a/src/config.rs b/src/config.rs index 17a0cb7..755bea2 100644 --- a/src/config.rs +++ b/src/config.rs @@ -35,9 +35,14 @@ pub struct Config { /// Data location pub data_dir: PathBuf, + /// Bitcoin network type (mainnet, testnet, etc.) + /// Each BP-Node instance is designed to work with a single network type. + /// To work with multiple networks, create separate instances with different data directories. pub network: Network, + /// Addresses to listen for RPC connections pub rpc: Vec, + /// Addresses to listen for block import connections pub import: Vec, } diff --git a/src/db.rs b/src/db.rs index 429365e..7e7a9f4 100644 --- a/src/db.rs +++ b/src/db.rs @@ -221,13 +221,14 @@ pub const TABLE_TX_BLOCKS: TableDefinition = TableDefinition::new // Maps transaction input to the output it spends pub const TABLE_INPUTS: TableDefinition<(TxNo, u32), (TxNo, u32)> = TableDefinition::new("inputs"); -// Tracks the active chain and orphaned blocks -pub const TABLE_CHAIN: TableDefinition<&'static str, Vec> = TableDefinition::new("chain"); - // Records UTXOs spent in each block for reorg handling pub const TABLE_BLOCK_SPENDS: TableDefinition> = TableDefinition::new("block_spends"); +// Each BP-Node instance is designed to work with a single blockchain network. +// If multiple networks need to be indexed, separate instances should be used +// with different data directories. The network information is stored in the +// MAIN table under the REC_NETWORK key. pub struct IndexDb(Database); impl IndexDb { diff --git a/src/importer.rs b/src/importer.rs index 4ca803a..8416c78 100644 --- a/src/importer.rs +++ b/src/importer.rs @@ -126,22 +126,10 @@ impl ServiceController for BlockI fn on_command(&mut self, cmd: ImporterCmd) { match cmd { ImporterCmd::TrackTxid(filters) => { - self.processor.track( - filters - .into_iter() - .map(|a| a.to_byte_array()) - .map(Txid::from) - .collect(), - ); + self.processor.track(filters); } ImporterCmd::Untrack(filters) => { - self.processor.untrack( - filters - .into_iter() - .map(|a| a.to_byte_array()) - .map(Txid::from) - .collect(), - ); + self.processor.untrack(filters); } } } diff --git a/src/lib.rs b/src/lib.rs index 27df240..2a17651 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,6 +21,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! BP-Node: Bitcoin blockchain indexer +//! +//! Each BP-Node instance is designed to work with a single blockchain network. +//! If multiple networks need to be indexed (mainnet, testnet, etc.), separate +//! instances should be used with different data directories. + #[macro_use] extern crate amplify; @@ -32,7 +38,7 @@ mod blocks; pub mod db; mod importer; -pub use blocks::{BlockProcError, BlockProcessor}; +pub use blocks::{BlockProcError, BlockProcessor, REC_NETWORK}; pub use broker::{Broker, BrokerError, BrokerRpcMsg, PATH_INDEXDB, TrackReq}; pub use config::Config; pub use importer::{BlockImporter, ImporterCmd, ImporterMsg}; From 709c569bea730042f731f668d24693a95cea834f Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Mon, 14 Apr 2025 16:30:52 +0800 Subject: [PATCH 05/21] feat(WIP): implement orphan block handling Signed-off-by: will-bitlightlabs --- src/blocks.rs | 574 +++++++++++++++++++++++++++++++++++++++++++++++--- src/db.rs | 43 +++- 2 files changed, 585 insertions(+), 32 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index 329eead..a4c835e 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -24,19 +24,21 @@ //! Block importer interface organized into a reactor thread. use std::collections::HashSet; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; use amplify::{ByteArray, FromSliceError}; use bprpc::BloomFilter32; -use bpwallet::{Block, BlockHash}; +use bpwallet::{Block, BlockHash, ConsensusDecode, ConsensusEncode}; use crossbeam_channel::{RecvError, SendError, Sender}; use microservices::USender; -use redb::{CommitError, ReadableTable, StorageError, TableError}; +use redb::{CommitError, ReadableTable, ReadableTableMetadata, StorageError, TableError}; use crate::ImporterMsg; use crate::db::{ - BlockId, DbBlockHeader, DbMsg, DbTx, REC_BLOCKID, REC_CHAIN, REC_ORPHANS, REC_TXNO, TABLE_BLKS, - TABLE_BLOCK_SPENDS, TABLE_BLOCKIDS, TABLE_HEIGHTS, TABLE_INPUTS, TABLE_MAIN, TABLE_OUTS, - TABLE_SPKS, TABLE_TX_BLOCKS, TABLE_TXES, TABLE_TXIDS, TABLE_UTXOS, TxNo, + BlockId, DbBlock, DbBlockHeader, DbMsg, DbTx, REC_BLOCKID, REC_TXNO, TABLE_BLKS, + TABLE_BLOCK_HEIGHTS, TABLE_BLOCK_SPENDS, TABLE_BLOCK_TXS, TABLE_BLOCKIDS, TABLE_HEIGHTS, + TABLE_INPUTS, TABLE_MAIN, TABLE_ORPHAN_PARENTS, TABLE_ORPHANS, TABLE_OUTS, TABLE_SPKS, + TABLE_TX_BLOCKS, TABLE_TXES, TABLE_TXIDS, TABLE_UTXOS, TxNo, }; const NAME: &str = "blockproc"; @@ -44,6 +46,11 @@ const NAME: &str = "blockproc"; // Network information record in main table pub const REC_NETWORK: &str = "network"; +// Constants for orphan block management +const MAX_ORPHAN_BLOCKS: usize = 100; +// Orphan blocks expire after 24 hours +const ORPHAN_EXPIRY_HOURS: u64 = 24; + pub struct BlockProcessor { db: USender, broker: Sender, @@ -61,12 +68,8 @@ impl BlockProcessor { self.tracking.retain(|filter| !filters.contains(filter)); } - // Helper function to calculate block height - fn calculate_block_height( - &self, - block: &Block, - blockid: BlockId, - ) -> Result { + // Helper function to calculate block height based on previous block hash + fn calculate_block_height(&self, block: &Block) -> Result { // For genesis block, height is always 0 // Check for all zeros hash which is the genesis block's prev_hash let zero_hash = [0u8; 32]; @@ -74,19 +77,83 @@ impl BlockProcessor { return Ok(0); } - // Since each BP-Node instance works with a single chain, - // for simplicity we use block ID as a height fallback. - // In a multi-chain system, we would need more sophisticated height calculation. - // When proper reorg handling is implemented this should be revisited. + // Find block height of the previous block and add 1 + let (tx, rx) = crossbeam_channel::bounded(1); + self.db.send(DbMsg::Write(tx))?; + let db = rx.recv()?; + + // Lookup the block ID for the previous block hash + let blockids_table = db + .open_table(TABLE_BLOCKIDS) + .map_err(|e| BlockProcError::Custom(format!("Block IDs table error: {}", e)))?; + + let prev_blockid = blockids_table + .get(block.header.prev_block_hash.to_byte_array()) + .map_err(|e| BlockProcError::Custom(format!("Block ID lookup error: {}", e)))?; + + // If previous block not found, it's an orphan block + if prev_blockid.is_none() { + log::debug!( + target: NAME, + "Orphan block detected: parent block {} not found", + block.header.prev_block_hash + ); + return Err(BlockProcError::OrphanBlock(block.header.prev_block_hash)); + } + + let prev_blockid_record = prev_blockid.unwrap(); + // Get the previous block's ID + let prev_blockid = prev_blockid_record.value(); + + // First check the BlockId to height mapping table which is more efficient + let block_heights_table = db + .open_table(TABLE_BLOCK_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; + + if let Some(prev_height_record) = block_heights_table + .get(prev_blockid) + .map_err(|e| BlockProcError::Custom(format!("Block height lookup error: {}", e)))? + { + let prev_height = prev_height_record.value(); + return Ok(prev_height + 1); + } + + // If not found in the direct mapping table, check the height -> blockid table + let heights_table = db + .open_table(TABLE_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; - // For now, if this is genesis block (blockid == 0), return 0 - // otherwise, simply use blockid as height which will be roughly equivalent - // This simplifies the logic while maintaining the distinction between concepts + // Scan the heights table to find the previous block ID + let heights_iter = heights_table + .iter() + .map_err(|e| BlockProcError::Custom(format!("Heights table iterator error: {}", e)))?; + + for height_entry in heights_iter { + let (height, block_id) = height_entry + .map_err(|e| BlockProcError::Custom(format!("Heights entry error: {}", e)))?; + + if block_id.value() == prev_blockid { + // Previous block's height + 1 is the current block's height + return Ok(height.value() + 1); + } + } - Ok(blockid.as_u32()) + // If we couldn't find the previous block in either height table, + // this is an error condition as the database is in an inconsistent state + Err(BlockProcError::Custom(format!( + "Database inconsistency: Previous block with ID {} found in blockids table but not in \ + any height table", + prev_blockid + ))) } pub fn process_block(&mut self, id: BlockHash, block: Block) -> Result { + // Store a copy of the parent hash for potential orphan block handling + let parent_hash = block.header.prev_block_hash; + // Clone the block for potential orphan processing + let block_clone = block.clone(); + + // Regular block processing starts here let (tx, rx) = crossbeam_channel::bounded(1); self.db.send(DbMsg::Write(tx))?; let db = rx.recv()?; @@ -104,7 +171,7 @@ impl BlockProcessor { }; // Get or create the next block ID - let blockid = { + let mut blockid = { let main = db .open_table(TABLE_MAIN) .map_err(BlockProcError::MainTable)?; @@ -114,9 +181,7 @@ impl BlockProcessor { { Some(rec) => { // Parse bytes into BlockId using from_bytes method - let mut bid = BlockId::from_bytes(rec.value()); - bid.inc_assign(); - bid + BlockId::from_bytes(rec.value()) } None => BlockId::start(), } @@ -124,12 +189,25 @@ impl BlockProcessor { let mut count = 0; let process = || -> Result<(), BlockProcError> { + // Calculate the block height based on previous block + // This function will also detect orphan blocks + let height = match self.calculate_block_height(&block) { + Ok(h) => h, + Err(BlockProcError::OrphanBlock(_)) => { + // If we detect an orphan block, abort this transaction and save the orphan + return Err(BlockProcError::OrphanBlock(parent_hash)); + } + Err(e) => return Err(e), + }; + + blockid.inc_assign(); + // Store block header let mut table = db .open_table(TABLE_BLKS) .map_err(BlockProcError::BlockTable)?; table - .insert(id.to_byte_array(), DbBlockHeader::from(block.header)) + .insert(blockid, DbBlockHeader::from(block.header)) .map_err(BlockProcError::BlockStorage)?; // Map block hash to block ID @@ -140,10 +218,6 @@ impl BlockProcessor { .insert(id.to_byte_array(), blockid) .map_err(|e| BlockProcError::Custom(format!("Block ID storage error: {}", e)))?; - // Calculate the block height based on previous block instead of using blockid - // This is crucial for maintaining correct block heights during chain reorganizations - let height = self.calculate_block_height(&block, blockid)?; - log::debug!( target: NAME, "Processing block {} at height {} with internal ID {}", @@ -180,7 +254,14 @@ impl BlockProcessor { // 2. Rolling back transactions in the old chain branch // 3. Applying transactions from the new chain branch // 4. Updating UTXO set accordingly + + // When implementing reorg, make sure to update both height tables: + // - TABLE_HEIGHTS: height -> blockid mapping + // - TABLE_BLOCK_HEIGHTS: blockid -> height mapping + // For now, we'll just overwrite the existing entry + // This simple approach doesn't handle the full reorg properly + // but ensures the database doesn't get into an inconsistent state } } @@ -188,14 +269,28 @@ impl BlockProcessor { .insert(height, blockid) .map_err(|e| BlockProcError::Custom(format!("Heights storage error: {}", e)))?; + // Also update the reverse mapping (blockid -> height) + let mut block_heights_table = db + .open_table(TABLE_BLOCK_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; + block_heights_table.insert(blockid, height).map_err(|e| { + BlockProcError::Custom(format!("Block height storage error: {}", e)) + })?; + // Track UTXOs spent in this block let mut block_spends = Vec::new(); + // Track all transactions in this block + let mut block_txs = Vec::new(); + // Process transactions in the block for tx in block.transactions { let txid = tx.txid(); txno.inc_assign(); + // Add transaction to the list for this block + block_txs.push(txno); + // Store transaction ID to transaction number mapping let mut txids_table = db .open_table(TABLE_TXIDS) @@ -322,6 +417,14 @@ impl BlockProcessor { count += 1; } + // Store all transaction numbers in this block + let mut block_txs_table = db + .open_table(TABLE_BLOCK_TXS) + .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?; + block_txs_table + .insert(blockid, block_txs) + .map_err(|e| BlockProcError::Custom(format!("Block-txs storage error: {}", e)))?; + // Store UTXOs spent in this block let mut block_spends_table = db .open_table(TABLE_BLOCK_SPENDS) @@ -357,15 +460,423 @@ impl BlockProcessor { Ok(()) }; + match process() { + Err(BlockProcError::OrphanBlock(_)) => { + // Handle orphan block case + if let Err(err) = db.abort() { + log::warn!(target: NAME, "Unable to abort failed database transaction due to {err}"); + }; + + // Save the orphan block for later processing + log::info!( + target: NAME, + "Orphan block detected: Parent block {} not found for block {}", + parent_hash, + id + ); + + return self.save_orphan_block(id, block_clone); + } + Err(e) => { + // Handle other errors + if let Err(err) = db.abort() { + log::warn!(target: NAME, "Unable to abort failed database transaction due to {err}"); + }; + return Err(e); + } + Ok(()) => { + // Successful processing + db.commit()?; + + // After successful processing, check if we have any orphans that depend on this + // block + self.process_orphans(id)?; + + // Final log message + log::debug!( + target: NAME, + "Successfully processed block {} with {} transactions", + id, + count + ); + + Ok(count) + } + } + } + + // Save an orphan block for later processing + fn save_orphan_block(&self, id: BlockHash, block: Block) -> Result { + log::info!( + target: NAME, + "Saving orphan block {} with parent {} for later processing", + id, + block.header.prev_block_hash + ); + + // First, check if we should clean up old orphans + self.clean_expired_orphans()?; + + // Then check if we have too many orphans + if self.count_orphans()? >= MAX_ORPHAN_BLOCKS { + log::warn!( + target: NAME, + "Orphan block limit reached ({}). Rejecting new orphan block {}", + MAX_ORPHAN_BLOCKS, + id + ); + // Simply ignore this orphan block + return Ok(0); + } + + let (tx, rx) = crossbeam_channel::bounded(1); + self.db.send(DbMsg::Write(tx))?; + let db = rx.recv()?; + + let process = || -> Result<(), BlockProcError> { + // Get the current timestamp for expiry tracking + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::from_secs(0)) + .as_secs(); + + let parent_hash = block.header.prev_block_hash.to_byte_array(); + + // Store the orphan block + let mut orphans_table = db + .open_table(TABLE_ORPHANS) + .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; + + orphans_table + .insert(id.to_byte_array(), (DbBlock::from(block), now)) + .map_err(|e| BlockProcError::Custom(format!("Orphan storage error: {}", e)))?; + + // Index by parent hash to allow quick lookup when parent is processed + let mut orphan_parents_table = db.open_table(TABLE_ORPHAN_PARENTS).map_err(|e| { + BlockProcError::Custom(format!("Orphan parents table error: {}", e)) + })?; + + // Get existing orphans with the same parent, if any + let mut orphan_list = orphan_parents_table + .get(parent_hash) + .map_err(|e| BlockProcError::Custom(format!("Orphan parents lookup error: {}", e)))? + .map(|v| v.value().to_vec()) + .unwrap_or_default(); + + // Add this orphan to the list + orphan_list.push(id.to_byte_array()); + + // Update the orphan parents table + orphan_parents_table + .insert(parent_hash, orphan_list) + .map_err(|e| { + BlockProcError::Custom(format!("Orphan parents update error: {}", e)) + })?; + + Ok(()) + }; + if let Err(e) = process() { if let Err(err) = db.abort() { - log::warn!(target: NAME, "Unable to abort failed database transaction due to {err}"); + log::warn!( + target: NAME, + "Unable to abort failed orphan block storage transaction due to {err}" + ); }; return Err(e); } + db.commit()?; - Ok(count) + log::info!( + target: NAME, + "Successfully saved orphan block {} for later processing", + id + ); + + // Return 0 since we didn't process any transactions yet + Ok(0) + } + + // Process orphan blocks that depend on a given block + fn process_orphans(&mut self, parent_id: BlockHash) -> Result<(), BlockProcError> { + // First check if we have any orphans that depend on this block + let (tx, rx) = crossbeam_channel::bounded(1); + self.db.send(DbMsg::Read(tx))?; + let db = rx.recv()?; + + // Check orphan parents table + let orphan_parents_table = db + .open_table(TABLE_ORPHAN_PARENTS) + .map_err(|e| BlockProcError::Custom(format!("Orphan parents table error: {}", e)))?; + + let parent_hash = parent_id.to_byte_array(); + let orphans = orphan_parents_table + .get(parent_hash) + .map_err(|e| BlockProcError::Custom(format!("Orphan parents lookup error: {}", e)))?; + + // If no orphans depend on this block, we're done + if orphans.is_none() { + return Ok(()); + } + + // Get list of orphan block hashes + let orphan_hashes = orphans.unwrap().value().to_vec(); + + // Process each orphan block + let orphans_table = db + .open_table(TABLE_ORPHANS) + .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; + + let mut processed_orphans = Vec::with_capacity(orphan_hashes.len()); + + for orphan_hash in &orphan_hashes { + // Get the orphan block data + if let Some(orphan_block_data) = orphans_table + .get(orphan_hash) + .map_err(|e| BlockProcError::Custom(format!("Orphan lookup error: {}", e)))? + { + let (_block_data, _timestamp) = orphan_block_data.value(); + + // TODO: Implement + todo!(); + // Track that we processed this orphan + processed_orphans.push(orphan_hash.clone()); + } + } + + // Remove processed orphans from the database + if !processed_orphans.is_empty() { + let (tx, rx) = crossbeam_channel::bounded(1); + self.db.send(DbMsg::Write(tx))?; + let write_db = rx.recv()?; + + let remove_processed = || -> Result<(), BlockProcError> { + // Remove from orphan parents table + let mut orphan_parents_table = + write_db.open_table(TABLE_ORPHAN_PARENTS).map_err(|e| { + BlockProcError::Custom(format!("Orphan parents table error: {}", e)) + })?; + + // Remove the whole entry if all orphans for this parent were processed + if orphan_hashes.len() == processed_orphans.len() { + orphan_parents_table.remove(parent_hash).map_err(|e| { + BlockProcError::Custom(format!("Orphan parents removal error: {}", e)) + })?; + } else { + // Otherwise, update the list to remove the processed orphans + let remaining_orphans: Vec<[u8; 32]> = orphan_hashes + .into_iter() + .filter(|h| !processed_orphans.contains(&h)) + .collect(); + + orphan_parents_table + .insert(parent_hash, remaining_orphans) + .map_err(|e| { + BlockProcError::Custom(format!("Parent update error: {}", e)) + })?; + } + + // Remove from orphans table + let mut orphans_table = write_db + .open_table(TABLE_ORPHANS) + .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; + + for orphan_hash in &processed_orphans { + orphans_table.remove(*orphan_hash).map_err(|e| { + BlockProcError::Custom(format!("Orphan removal error: {}", e)) + })?; + } + + Ok(()) + }; + + if let Err(e) = remove_processed() { + if let Err(err) = write_db.abort() { + log::warn!( + target: NAME, + "Unable to abort failed orphan cleanup transaction due to {err}" + ); + }; + return Err(e); + } + + write_db.commit()?; + + log::info!( + target: NAME, + "Cleaned up {} processed orphan blocks", + processed_orphans.len() + ); + } + + Ok(()) + } + + // Count total number of orphan blocks + fn count_orphans(&self) -> Result { + let (tx, rx) = crossbeam_channel::bounded(1); + self.db.send(DbMsg::Read(tx))?; + let db = rx.recv()?; + + let orphans_table = db + .open_table(TABLE_ORPHANS) + .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; + + let count = orphans_table + .len() + .map_err(|e| BlockProcError::Custom(format!("Orphans table length error: {}", e)))?; + + Ok(count as usize) + } + + // Remove expired orphan blocks + fn clean_expired_orphans(&self) -> Result<(), BlockProcError> { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or(Duration::from_secs(0)) + .as_secs(); + + let expiry_seconds = ORPHAN_EXPIRY_HOURS * 3600; + + // First read to find expired orphans + let (read_tx, read_rx) = crossbeam_channel::bounded(1); + self.db.send(DbMsg::Read(read_tx))?; + let read_db = read_rx.recv()?; + + let orphans_table = read_db + .open_table(TABLE_ORPHANS) + .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; + + let mut expired_orphans = Vec::new(); + + let orphans_iter = orphans_table.iter().map_err(|e| { + BlockProcError::Custom(format!("Failed to iterate orphans table: {}", e)) + })?; + + for entry in orphans_iter { + let (hash, data) = entry.map_err(|e| { + BlockProcError::Custom(format!("Failed to read orphan entry: {}", e)) + })?; + + let (_block_data, timestamp) = data.value(); + + // Check if orphan has expired + if now - timestamp > expiry_seconds { + expired_orphans.push(hash.value()); + } + } + + // If we have expired orphans, remove them + if !expired_orphans.is_empty() { + log::info!( + target: NAME, + "Found {} expired orphan blocks to clean up", + expired_orphans.len() + ); + + let (write_tx, write_rx) = crossbeam_channel::bounded(1); + self.db.send(DbMsg::Write(write_tx))?; + let write_db = write_rx.recv()?; + + let remove_expired = || -> Result<(), BlockProcError> { + let mut orphans_table = write_db + .open_table(TABLE_ORPHANS) + .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; + + let mut orphan_parents_table = + write_db.open_table(TABLE_ORPHAN_PARENTS).map_err(|e| { + BlockProcError::Custom(format!("Orphan parents table error: {}", e)) + })?; + + for orphan_hash in &expired_orphans { + // Remove from orphans table + orphans_table.remove(orphan_hash).map_err(|e| { + BlockProcError::Custom(format!("Orphan removal error: {}", e)) + })?; + + // Also need to remove from parent mappings + // This is more complex as we need to scan all parent entries + let parents_iter = orphan_parents_table.iter().map_err(|e| { + BlockProcError::Custom(format!("Failed to iterate orphan parents: {}", e)) + })?; + + // First collect all parents to scan + let mut parents_to_scan = Vec::new(); + + for parent_entry in parents_iter { + let (parent_hash, orphans) = parent_entry.map_err(|e| { + BlockProcError::Custom(format!("Failed to read parent entry: {}", e)) + })?; + + // Store parent data for later processing + parents_to_scan + .push((parent_hash.value().clone(), orphans.value().to_vec())); + } + + // Now process parents without borrowing the table + for (parent_hash, orphans_list) in parents_to_scan { + // We need to iterate each orphan hash in the list and check if it + // matches our target + let mut found = false; + for list_hash in &orphans_list { + // Convert both to slices for comparison + if list_hash == orphan_hash { + found = true; + break; + } + } + + if found { + // Remove this orphan from the list + let updated_list: Vec<[u8; 32]> = orphans_list + .into_iter() + .filter(|h| h != orphan_hash) + .collect(); + + if updated_list.is_empty() { + // If no orphans left for this parent, remove the entry + orphan_parents_table.remove(parent_hash).map_err(|e| { + BlockProcError::Custom(format!("Parent removal error: {}", e)) + })?; + } else { + // Otherwise update with the filtered list + orphan_parents_table + .insert(parent_hash, updated_list) + .map_err(|e| { + BlockProcError::Custom(format!( + "Parent update error: {}", + e + )) + })?; + } + } + } + } + + Ok(()) + }; + + if let Err(e) = remove_expired() { + if let Err(err) = write_db.abort() { + log::warn!( + target: NAME, + "Unable to abort failed orphan cleanup transaction due to {err}" + ); + } + return Err(e); + } + + write_db.commit()?; + + log::info!( + target: NAME, + "Successfully removed {} expired orphan blocks", + expired_orphans.len() + ); + } + + Ok(()) } } @@ -424,6 +935,9 @@ pub enum BlockProcError { /// Unable to find block: {0} BlockLookup(StorageError), + /// Orphan block detected: parent block {0} not found + OrphanBlock(BlockHash), + /// Custom error: {0} Custom(String), } diff --git a/src/db.rs b/src/db.rs index 7e7a9f4..90c0d73 100644 --- a/src/db.rs +++ b/src/db.rs @@ -27,7 +27,7 @@ use std::path::Path; use amplify::num::u40; use amplify::{ByteArray, FromSliceError}; -use bpwallet::{BlockHeader, ConsensusDecode, ConsensusEncode, Tx}; +use bpwallet::{Block, BlockHeader, ConsensusDecode, ConsensusEncode, Tx}; use crossbeam_channel::{SendError, Sender}; use microservices::UService; use redb::{ @@ -93,6 +93,9 @@ impl ByteArray<5> for TxNo { #[derive(Wrapper, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug, From)] pub struct DbBlockHeader(#[from] BlockHeader); +#[derive(Wrapper, Clone, Eq, PartialEq, Debug, From)] +pub struct DbBlock(#[from] Block); + #[derive(Wrapper, Clone, Eq, PartialEq, Debug, From)] pub struct DbTx(#[from] Tx); @@ -140,6 +143,25 @@ impl redb::Value for DbBlockHeader { fn type_name() -> TypeName { TypeName::new("BpNodeBlockHeader") } } +impl redb::Value for DbBlock { + type SelfType<'a> = Self; + type AsBytes<'a> = Vec; + + fn fixed_width() -> Option { None } + + fn from_bytes<'a>(data: &'a [u8]) -> Self::SelfType<'a> + where Self: 'a { + Self(unsafe { Block::consensus_deserialize(data).unwrap_unchecked() }) + } + + fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> + where Self: 'b { + value.0.consensus_serialize() + } + + fn type_name() -> TypeName { TypeName::new("BpNodeBlock") } +} + impl redb::Value for DbTx { type SelfType<'a> = Self; type AsBytes<'a> = Vec; @@ -192,7 +214,7 @@ pub const REC_ORPHANS: &str = "orphans"; pub const TABLE_MAIN: TableDefinition<&'static str, &[u8]> = TableDefinition::new("main"); // Maps block hash to block header -pub const TABLE_BLKS: TableDefinition<[u8; 32], DbBlockHeader> = TableDefinition::new("blocks"); +pub const TABLE_BLKS: TableDefinition = TableDefinition::new("blocks"); // Maps transaction ID to internal transaction number pub const TABLE_TXIDS: TableDefinition<[u8; 32], TxNo> = TableDefinition::new("txids"); @@ -215,9 +237,16 @@ pub const TABLE_UTXOS: TableDefinition<(TxNo, u32), ()> = TableDefinition::new(" // Maps block height to block ID pub const TABLE_HEIGHTS: TableDefinition = TableDefinition::new("block_heights"); +// Maps block ID to block height (reverse of TABLE_HEIGHTS) +pub const TABLE_BLOCK_HEIGHTS: TableDefinition = + TableDefinition::new("blockid_height"); + // Maps transaction number to the block ID it belongs to pub const TABLE_TX_BLOCKS: TableDefinition = TableDefinition::new("tx_blocks"); +// Maps block ID to all transaction numbers it contains +pub const TABLE_BLOCK_TXS: TableDefinition> = TableDefinition::new("block_txs"); + // Maps transaction input to the output it spends pub const TABLE_INPUTS: TableDefinition<(TxNo, u32), (TxNo, u32)> = TableDefinition::new("inputs"); @@ -225,6 +254,16 @@ pub const TABLE_INPUTS: TableDefinition<(TxNo, u32), (TxNo, u32)> = TableDefinit pub const TABLE_BLOCK_SPENDS: TableDefinition> = TableDefinition::new("block_spends"); +// Stores orphan blocks (blocks whose parent block is not yet processed) +// Maps block hash to serialized block data +pub const TABLE_ORPHANS: TableDefinition<[u8; 32], (DbBlock, u64)> = + TableDefinition::new("orphans"); + +// Maps orphan block's parent hash to orphan block hash +// This allows quick lookup of orphan blocks when their parent is processed +pub const TABLE_ORPHAN_PARENTS: TableDefinition<[u8; 32], Vec<[u8; 32]>> = + TableDefinition::new("orphan_parents"); + // Each BP-Node instance is designed to work with a single blockchain network. // If multiple networks need to be indexed, separate instances should be used // with different data directories. The network information is stored in the From c5c9e15dee42590e2eb47a69006a92ff0edc4f59 Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Tue, 15 Apr 2025 00:36:20 +0800 Subject: [PATCH 06/21] feat: improve orphan block handling with delayed removal Signed-off-by: will-bitlightlabs --- src/blocks.rs | 440 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 305 insertions(+), 135 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index a4c835e..d6632b9 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -28,7 +28,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use amplify::{ByteArray, FromSliceError}; use bprpc::BloomFilter32; -use bpwallet::{Block, BlockHash, ConsensusDecode, ConsensusEncode}; +use bpwallet::{Block, BlockHash}; use crossbeam_channel::{RecvError, SendError, Sender}; use microservices::USender; use redb::{CommitError, ReadableTable, ReadableTableMetadata, StorageError, TableError}; @@ -488,9 +488,8 @@ impl BlockProcessor { // Successful processing db.commit()?; - // After successful processing, check if we have any orphans that depend on this - // block - self.process_orphans(id)?; + // IMPORTANT: No longer calling process_orphans here to avoid recursion + // This is now handled by process_block_and_orphans // Final log message log::debug!( @@ -505,6 +504,287 @@ impl BlockProcessor { } } + /// Process a block and all its dependent orphans in an iterative manner to avoid stack + /// overflow. + /// + /// This method should be used instead of directly calling `process_block` when you want to + /// ensure that orphan blocks dependent on the processed block are also handled. + /// + /// # Example + /// ```no_run + /// let processor = BlockProcessor::new(db, broker); + /// // Process a block and its dependent orphans + /// processor.process_block_and_orphans(block_hash, block)?; + /// ``` + pub fn process_block_and_orphans( + &mut self, + id: BlockHash, + block: Block, + ) -> Result { + // Create a queue to store blocks that need to be processed + // Store (block_hash, block, parent_hash) tuples + let mut pending_blocks = std::collections::VecDeque::new(); + pending_blocks.push_back((id, block, None)); + + let mut total_processed = 0; + + // Process blocks in a loop rather than recursive calls + while let Some((current_id, current_block, parent_hash)) = pending_blocks.pop_front() { + // Process the current block + match self.process_block(current_id, current_block) { + Ok(count) => { + total_processed += count; + + // If this was an orphan block (has a parent_hash), remove it from the orphan + // pool + if let Some(parent) = parent_hash { + // Only remove this specific orphan after successful processing + if let Err(e) = self.remove_processed_orphans(parent, &[current_id]) { + log::warn!( + target: NAME, + "Failed to remove processed orphan {}: {}", + current_id, + e + ); + } else { + log::info!( + target: NAME, + "Successfully removed processed orphan {} from pool", + current_id + ); + } + } + + // Find orphans that depend on this block + if let Ok(orphans) = self.find_dependent_orphans(current_id) { + // Skip if no orphans found + if !orphans.is_empty() { + // Add them to the queue for processing + for (orphan_id, orphan_block) in orphans { + log::info!( + target: NAME, + "Adding orphan block {} to processing queue", + orphan_id + ); + + // Add to the queue for processing + // Include the parent hash so we can remove it from orphan pool + // after processing + pending_blocks.push_back(( + orphan_id, + orphan_block, + Some(current_id), + )); + } + } + } + } + Err(e) => { + // For orphan blocks, we just continue with the next block + if let BlockProcError::OrphanBlock(_) = e { + log::debug!( + target: NAME, + "Orphan block {} will be processed later when its parent is available", + current_id + ); + continue; + } + + // For other errors, log and return the error + log::error!( + target: NAME, + "Error processing block {}: {}", + current_id, + e + ); + return Err(e); + } + } + } + + Ok(total_processed) + } + + // Helper method to find orphans that depend on a specific block + fn find_dependent_orphans( + &self, + parent_id: BlockHash, + ) -> Result, BlockProcError> { + // First check if we have any orphans that depend on this block + let (tx, rx) = crossbeam_channel::bounded(1); + self.db.send(DbMsg::Read(tx))?; + let db = rx.recv()?; + + // Check orphan parents table + let orphan_parents_table = db + .open_table(TABLE_ORPHAN_PARENTS) + .map_err(|e| BlockProcError::Custom(format!("Orphan parents table error: {}", e)))?; + + let parent_hash = parent_id.to_byte_array(); + let orphans = orphan_parents_table + .get(parent_hash) + .map_err(|e| BlockProcError::Custom(format!("Orphan parents lookup error: {}", e)))?; + + // If no orphans depend on this block, return empty list + if orphans.is_none() { + return Ok(Vec::new()); + } + + // Get list of orphan block hashes + let orphan_hashes = orphans.unwrap().value(); + + // Get orphans data + let orphans_table = db + .open_table(TABLE_ORPHANS) + .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; + + let mut dependent_orphans = Vec::with_capacity(orphan_hashes.len()); + + for orphan_hash in &orphan_hashes { + // Get the orphan block data + if let Some(orphan_block_data) = orphans_table + .get(orphan_hash) + .map_err(|e| BlockProcError::Custom(format!("Orphan lookup error: {}", e)))? + { + let (block_data, _timestamp) = orphan_block_data.value(); + + // Extract the Block object and create a BlockHash + let block = Block::from(block_data); + let block_hash = BlockHash::from_byte_array(*orphan_hash); + debug_assert_eq!(block.block_hash(), block_hash); + + dependent_orphans.push((block_hash, block)); + + log::info!( + target: NAME, + "Found orphan block {} with parent {}", + block_hash, + parent_id + ); + } + } + + // We don't remove orphans here - they'll be removed after successful processing + if !dependent_orphans.is_empty() { + log::info!( + target: NAME, + "Found {} orphan blocks dependent on block {}", + dependent_orphans.len(), + parent_id + ); + } + + Ok(dependent_orphans) + } + + // Modified to remove orphans after they've been processed + fn remove_processed_orphans( + &mut self, + parent_id: BlockHash, + processed: &[BlockHash], + ) -> Result<(), BlockProcError> { + if processed.is_empty() { + return Ok(()); + } + + let (tx, rx) = crossbeam_channel::bounded(1); + self.db.send(DbMsg::Write(tx))?; + let write_db = rx.recv()?; + + let remove_orphans = || -> Result<(), BlockProcError> { + // Remove from orphan parents table + let mut orphan_parents_table = + write_db.open_table(TABLE_ORPHAN_PARENTS).map_err(|e| { + BlockProcError::Custom(format!("Orphan parents table error: {}", e)) + })?; + + // Get the current list of orphans for this parent + let parent_hash = parent_id.to_byte_array(); + + // Get orphan list and immediately convert to Vec to drop the borrow + let orphan_hashes = { + let orphans = orphan_parents_table.get(parent_hash).map_err(|e| { + BlockProcError::Custom(format!("Orphan parents lookup error: {}", e)) + })?; + + if let Some(orphans_record) = orphans { + orphans_record.value().to_vec() + } else { + // No orphans found for this parent, nothing to do + return Ok(()); + } + }; + + // Filter out processed orphans + let remaining_orphans: Vec<[u8; 32]> = orphan_hashes + .into_iter() + .filter(|h| !processed.iter().any(|p| p.to_byte_array() == *h)) + .collect(); + + // Update or remove the entry + if remaining_orphans.is_empty() { + orphan_parents_table.remove(parent_hash).map_err(|e| { + BlockProcError::Custom(format!("Orphan parents removal error: {}", e)) + })?; + + log::debug!( + target: NAME, + "Removed all orphans for parent block {}", + parent_id + ); + } else { + orphan_parents_table + .insert(parent_hash, remaining_orphans) + .map_err(|e| BlockProcError::Custom(format!("Parent update error: {}", e)))?; + + log::debug!( + target: NAME, + "Updated orphan list for parent block {}", + parent_id + ); + } + + // Remove from orphans table + let mut orphans_table = write_db + .open_table(TABLE_ORPHANS) + .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; + + for orphan_hash in processed { + orphans_table + .remove(orphan_hash.to_byte_array()) + .map_err(|e| BlockProcError::Custom(format!("Orphan removal error: {}", e)))?; + + log::debug!( + target: NAME, + "Removed orphan block {} from orphans table", + orphan_hash + ); + } + + Ok(()) + }; + + if let Err(e) = remove_orphans() { + if let Err(err) = write_db.abort() { + log::warn!( + target: NAME, + "Unable to abort failed orphan cleanup transaction due to {err}" + ); + } + return Err(e); + } + + write_db.commit()?; + + log::info!( + target: NAME, + "Successfully removed {} processed orphan blocks", + processed.len() + ); + + Ok(()) + } + // Save an orphan block for later processing fn save_orphan_block(&self, id: BlockHash, block: Block) -> Result { log::info!( @@ -598,121 +878,6 @@ impl BlockProcessor { Ok(0) } - // Process orphan blocks that depend on a given block - fn process_orphans(&mut self, parent_id: BlockHash) -> Result<(), BlockProcError> { - // First check if we have any orphans that depend on this block - let (tx, rx) = crossbeam_channel::bounded(1); - self.db.send(DbMsg::Read(tx))?; - let db = rx.recv()?; - - // Check orphan parents table - let orphan_parents_table = db - .open_table(TABLE_ORPHAN_PARENTS) - .map_err(|e| BlockProcError::Custom(format!("Orphan parents table error: {}", e)))?; - - let parent_hash = parent_id.to_byte_array(); - let orphans = orphan_parents_table - .get(parent_hash) - .map_err(|e| BlockProcError::Custom(format!("Orphan parents lookup error: {}", e)))?; - - // If no orphans depend on this block, we're done - if orphans.is_none() { - return Ok(()); - } - - // Get list of orphan block hashes - let orphan_hashes = orphans.unwrap().value().to_vec(); - - // Process each orphan block - let orphans_table = db - .open_table(TABLE_ORPHANS) - .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; - - let mut processed_orphans = Vec::with_capacity(orphan_hashes.len()); - - for orphan_hash in &orphan_hashes { - // Get the orphan block data - if let Some(orphan_block_data) = orphans_table - .get(orphan_hash) - .map_err(|e| BlockProcError::Custom(format!("Orphan lookup error: {}", e)))? - { - let (_block_data, _timestamp) = orphan_block_data.value(); - - // TODO: Implement - todo!(); - // Track that we processed this orphan - processed_orphans.push(orphan_hash.clone()); - } - } - - // Remove processed orphans from the database - if !processed_orphans.is_empty() { - let (tx, rx) = crossbeam_channel::bounded(1); - self.db.send(DbMsg::Write(tx))?; - let write_db = rx.recv()?; - - let remove_processed = || -> Result<(), BlockProcError> { - // Remove from orphan parents table - let mut orphan_parents_table = - write_db.open_table(TABLE_ORPHAN_PARENTS).map_err(|e| { - BlockProcError::Custom(format!("Orphan parents table error: {}", e)) - })?; - - // Remove the whole entry if all orphans for this parent were processed - if orphan_hashes.len() == processed_orphans.len() { - orphan_parents_table.remove(parent_hash).map_err(|e| { - BlockProcError::Custom(format!("Orphan parents removal error: {}", e)) - })?; - } else { - // Otherwise, update the list to remove the processed orphans - let remaining_orphans: Vec<[u8; 32]> = orphan_hashes - .into_iter() - .filter(|h| !processed_orphans.contains(&h)) - .collect(); - - orphan_parents_table - .insert(parent_hash, remaining_orphans) - .map_err(|e| { - BlockProcError::Custom(format!("Parent update error: {}", e)) - })?; - } - - // Remove from orphans table - let mut orphans_table = write_db - .open_table(TABLE_ORPHANS) - .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; - - for orphan_hash in &processed_orphans { - orphans_table.remove(*orphan_hash).map_err(|e| { - BlockProcError::Custom(format!("Orphan removal error: {}", e)) - })?; - } - - Ok(()) - }; - - if let Err(e) = remove_processed() { - if let Err(err) = write_db.abort() { - log::warn!( - target: NAME, - "Unable to abort failed orphan cleanup transaction due to {err}" - ); - }; - return Err(e); - } - - write_db.commit()?; - - log::info!( - target: NAME, - "Cleaned up {} processed orphan blocks", - processed_orphans.len() - ); - } - - Ok(()) - } - // Count total number of orphan blocks fn count_orphans(&self) -> Result { let (tx, rx) = crossbeam_channel::bounded(1); @@ -723,51 +888,56 @@ impl BlockProcessor { .open_table(TABLE_ORPHANS) .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; - let count = orphans_table + let count: usize = orphans_table .len() - .map_err(|e| BlockProcError::Custom(format!("Orphans table length error: {}", e)))?; + .map_err(|e| BlockProcError::Custom(format!("Failed to count orphans: {}", e)))? + as usize; - Ok(count as usize) + Ok(count) } - // Remove expired orphan blocks + // Remove orphan blocks that have been in the pool for too long fn clean_expired_orphans(&self) -> Result<(), BlockProcError> { + log::debug!(target: NAME, "Checking for expired orphan blocks..."); + + let (tx, rx) = crossbeam_channel::bounded(1); + self.db.send(DbMsg::Read(tx))?; + let db = rx.recv()?; + let now = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap_or(Duration::from_secs(0)) .as_secs(); - let expiry_seconds = ORPHAN_EXPIRY_HOURS * 3600; - - // First read to find expired orphans - let (read_tx, read_rx) = crossbeam_channel::bounded(1); - self.db.send(DbMsg::Read(read_tx))?; - let read_db = read_rx.recv()?; + // Calculate expiry threshold + let expiry_secs = ORPHAN_EXPIRY_HOURS * 3600; + let expiry_threshold = now.saturating_sub(expiry_secs); - let orphans_table = read_db + // Find expired orphans + let orphans_table = db .open_table(TABLE_ORPHANS) .map_err(|e| BlockProcError::Custom(format!("Orphans table error: {}", e)))?; let mut expired_orphans = Vec::new(); + // Scan all orphans let orphans_iter = orphans_table.iter().map_err(|e| { BlockProcError::Custom(format!("Failed to iterate orphans table: {}", e)) })?; - for entry in orphans_iter { - let (hash, data) = entry.map_err(|e| { + for orphan_entry in orphans_iter { + let (orphan_hash, data) = orphan_entry.map_err(|e| { BlockProcError::Custom(format!("Failed to read orphan entry: {}", e)) })?; let (_block_data, timestamp) = data.value(); // Check if orphan has expired - if now - timestamp > expiry_seconds { - expired_orphans.push(hash.value()); + if timestamp < expiry_threshold { + expired_orphans.push(orphan_hash.value().clone()); } } - // If we have expired orphans, remove them if !expired_orphans.is_empty() { log::info!( target: NAME, From 659b9b88fc483b51ba03e97e6ffb47aaf7108283 Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Wed, 16 Apr 2025 01:02:11 +0800 Subject: [PATCH 07/21] feat: (WIP) Implementation of blockchain reorganization logic Signed-off-by: will-bitlightlabs --- src/blocks.rs | 1121 +++++++++++++++++++++++++++++++++++++++++++------ src/db.rs | 43 +- src/lib.rs | 3 +- 3 files changed, 1033 insertions(+), 134 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index d6632b9..ef19083 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -31,21 +31,21 @@ use bprpc::BloomFilter32; use bpwallet::{Block, BlockHash}; use crossbeam_channel::{RecvError, SendError, Sender}; use microservices::USender; -use redb::{CommitError, ReadableTable, ReadableTableMetadata, StorageError, TableError}; +use redb::{ + CommitError, ReadableTable, ReadableTableMetadata, StorageError, TableError, WriteTransaction, +}; use crate::ImporterMsg; use crate::db::{ - BlockId, DbBlock, DbBlockHeader, DbMsg, DbTx, REC_BLOCKID, REC_TXNO, TABLE_BLKS, - TABLE_BLOCK_HEIGHTS, TABLE_BLOCK_SPENDS, TABLE_BLOCK_TXS, TABLE_BLOCKIDS, TABLE_HEIGHTS, - TABLE_INPUTS, TABLE_MAIN, TABLE_ORPHAN_PARENTS, TABLE_ORPHANS, TABLE_OUTS, TABLE_SPKS, - TABLE_TX_BLOCKS, TABLE_TXES, TABLE_TXIDS, TABLE_UTXOS, TxNo, + BlockId, DbBlock, DbBlockHeader, DbMsg, DbTx, ForkId, REC_BLOCKID, REC_FORK_ID, REC_TXNO, + TABLE_BLKS, TABLE_BLOCK_HEIGHTS, TABLE_BLOCK_SPENDS, TABLE_BLOCK_TXS, TABLE_BLOCKIDS, + TABLE_FORK_TIPS, TABLE_FORKS, TABLE_HEIGHTS, TABLE_INPUTS, TABLE_MAIN, TABLE_ORPHAN_PARENTS, + TABLE_ORPHANS, TABLE_OUTS, TABLE_SPKS, TABLE_TX_BLOCKS, TABLE_TXES, TABLE_TXIDS, TABLE_UTXOS, + TxNo, }; const NAME: &str = "blockproc"; -// Network information record in main table -pub const REC_NETWORK: &str = "network"; - // Constants for orphan block management const MAX_ORPHAN_BLOCKS: usize = 100; // Orphan blocks expire after 24 hours @@ -110,41 +110,46 @@ impl BlockProcessor { .open_table(TABLE_BLOCK_HEIGHTS) .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; - if let Some(prev_height_record) = block_heights_table + let height = block_heights_table .get(prev_blockid) .map_err(|e| BlockProcError::Custom(format!("Block height lookup error: {}", e)))? - { - let prev_height = prev_height_record.value(); - return Ok(prev_height + 1); - } + .map(|v| { + let prev_height = v.value(); + prev_height + 1 + }) + .ok_or_else(|| { + BlockProcError::Custom(format!( + "Database inconsistency: Previous block with ID {} found in blockids table \ + but not in any height table", + prev_blockid + )) + })?; - // If not found in the direct mapping table, check the height -> blockid table + // Store block height information let heights_table = db .open_table(TABLE_HEIGHTS) .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; - // Scan the heights table to find the previous block ID - let heights_iter = heights_table - .iter() - .map_err(|e| BlockProcError::Custom(format!("Heights table iterator error: {}", e)))?; - - for height_entry in heights_iter { - let (height, block_id) = height_entry - .map_err(|e| BlockProcError::Custom(format!("Heights entry error: {}", e)))?; + // Check if we already have a block at this height + if let Some(existing_blockid) = heights_table + .get(height) + .map_err(|e| BlockProcError::Custom(format!("Heights lookup error: {}", e)))? + .map(|v| v.value()) + { + log::warn!( + target: NAME, + "Detected potential chain fork at height {}: existing block ID {}", + height, + existing_blockid, + ); - if block_id.value() == prev_blockid { - // Previous block's height + 1 is the current block's height - return Ok(height.value() + 1); - } + return Err(BlockProcError::PotentialFork( + block.block_hash(), + height, + existing_blockid, + )); } - - // If we couldn't find the previous block in either height table, - // this is an error condition as the database is in an inconsistent state - Err(BlockProcError::Custom(format!( - "Database inconsistency: Previous block with ID {} found in blockids table but not in \ - any height table", - prev_blockid - ))) + Ok(height) } pub fn process_block(&mut self, id: BlockHash, block: Block) -> Result { @@ -170,37 +175,13 @@ impl BlockProcessor { TxNo::from_slice(rec.value()).map_err(BlockProcError::TxNoInvalid)? }; - // Get or create the next block ID - let mut blockid = { - let main = db - .open_table(TABLE_MAIN) - .map_err(BlockProcError::MainTable)?; - match main - .get(REC_BLOCKID) - .map_err(|e| BlockProcError::Custom(format!("Block ID lookup error: {}", e)))? - { - Some(rec) => { - // Parse bytes into BlockId using from_bytes method - BlockId::from_bytes(rec.value()) - } - None => BlockId::start(), - } - }; - let mut count = 0; let process = || -> Result<(), BlockProcError> { // Calculate the block height based on previous block - // This function will also detect orphan blocks - let height = match self.calculate_block_height(&block) { - Ok(h) => h, - Err(BlockProcError::OrphanBlock(_)) => { - // If we detect an orphan block, abort this transaction and save the orphan - return Err(BlockProcError::OrphanBlock(parent_hash)); - } - Err(e) => return Err(e), - }; + // This function will also detect orphan blocks and potential forks + let height = self.calculate_block_height(&block)?; - blockid.inc_assign(); + let blockid = self.get_next_block_id(&db)?; // Store block header let mut table = db @@ -226,45 +207,9 @@ impl BlockProcessor { blockid ); - // Store block height information let mut heights_table = db .open_table(TABLE_HEIGHTS) - .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; - - // Check if we already have a block at this height - if let Some(existing_blockid) = heights_table - .get(height) - .map_err(|e| BlockProcError::Custom(format!("Heights lookup error: {}", e)))? - .map(|v| v.value()) - { - // If different block at this height, we have a potential reorg - if existing_blockid != blockid { - log::warn!( - target: NAME, - "Detected potential chain reorganization at height {}: replacing block ID {} with {}", - height, - existing_blockid, - blockid - ); - - // TODO: Implement full reorg handling - // In a single-chain BP-Node instance, reorgs are detected when a different - // block is encountered at the same height. The proper handling would include: - // 1. Finding the common ancestor block - // 2. Rolling back transactions in the old chain branch - // 3. Applying transactions from the new chain branch - // 4. Updating UTXO set accordingly - - // When implementing reorg, make sure to update both height tables: - // - TABLE_HEIGHTS: height -> blockid mapping - // - TABLE_BLOCK_HEIGHTS: blockid -> height mapping - - // For now, we'll just overwrite the existing entry - // This simple approach doesn't handle the full reorg properly - // but ensures the database doesn't get into an inconsistent state - } - } - + .map_err(BlockProcError::HeightsTable)?; heights_table .insert(height, blockid) .map_err(|e| BlockProcError::Custom(format!("Heights storage error: {}", e)))?; @@ -444,10 +389,6 @@ impl BlockProcessor { main.insert(REC_TXNO, txno.to_byte_array().as_slice()) .map_err(BlockProcError::TxNoUpdate)?; - // Update block ID counter - main.insert(REC_BLOCKID, &blockid.to_bytes().as_slice()) - .map_err(|e| BlockProcError::Custom(format!("Block ID update error: {}", e)))?; - // Log successful block processing log::debug!( target: NAME, @@ -461,6 +402,18 @@ impl BlockProcessor { }; match process() { + Ok(()) => { + db.commit()?; + + log::debug!( + target: NAME, + "Successfully processed block {} with {} transactions", + id, + count + ); + + Ok(count) + } Err(BlockProcError::OrphanBlock(_)) => { // Handle orphan block case if let Err(err) = db.abort() { @@ -477,6 +430,24 @@ impl BlockProcessor { return self.save_orphan_block(id, block_clone); } + Err(BlockProcError::PotentialFork(new_block_hash, height, existing_blockid)) => { + // Handle potential fork case + if let Err(err) = db.abort() { + log::warn!(target: NAME, "Unable to abort failed database transaction due to {err}"); + }; + + // Record this as a potential fork for later verification + // Store the new block but don't update the height tables yet + // We'll only perform a reorganization if this fork becomes the longest chain + // TODO: store the new block in the database + self.process_potential_fork(id, &block_clone, height, existing_blockid)?; + + return Err(BlockProcError::PotentialFork( + new_block_hash, + height, + existing_blockid, + )); + } Err(e) => { // Handle other errors if let Err(err) = db.abort() { @@ -484,23 +455,6 @@ impl BlockProcessor { }; return Err(e); } - Ok(()) => { - // Successful processing - db.commit()?; - - // IMPORTANT: No longer calling process_orphans here to avoid recursion - // This is now handled by process_block_and_orphans - - // Final log message - log::debug!( - target: NAME, - "Successfully processed block {} with {} transactions", - id, - count - ); - - Ok(count) - } } } @@ -1048,6 +1002,929 @@ impl BlockProcessor { Ok(()) } + + /// Process a block that might create a fork in the blockchain. + /// This method records fork information and checks if we need to perform a chain + /// reorganization. + fn process_potential_fork( + &mut self, + block_hash: BlockHash, + block: &Block, + height: u32, + existing_blockid: BlockId, + ) -> Result<(), BlockProcError> { + let (tx, rx) = crossbeam_channel::bounded(1); + self.db.send(DbMsg::Write(tx))?; + let db = rx.recv()?; + + let new_blockid = self.get_next_block_id(&db)?; + // First, check if this block is extending an existing fork + let fork_id = if let Some(parent_fork_id) = + self.find_fork_by_block_hash(&db, block.header.prev_block_hash)? + { + // This block extends an existing fork + log::info!( + target: NAME, + "Block {} at height {} extends existing fork {}", + block_hash, + height, + parent_fork_id + ); + + // Update the fork with this new block + self.update_fork(&db, parent_fork_id, height, new_blockid, block_hash)?; + + parent_fork_id + } else { + // This block might start a new fork + // First check if its parent is in the main chain + if self.is_block_in_main_chain(&db, block.header.prev_block_hash)? { + // Create a new fork + let new_fork_id = match self.record_fork( + &db, + height, + existing_blockid, + new_blockid, + block_hash, + )? { + Some(id) => id, + None => { + // This shouldn't happen, but handle it gracefully + log::warn!( + target: NAME, + "Failed to create new fork for block {} at height {}", + block_hash, + height + ); + return Ok(()); + } + }; + + new_fork_id + } else { + // Parent is not in main chain and not in a known fork + // This could be an orphan or invalid block + log::warn!( + target: NAME, + "Block {} at height {} is disconnected: parent {} not found in main chain or forks", + block_hash, + height, + block.header.prev_block_hash + ); + return Ok(()); + } + }; + + // Check if this fork is now longer than the main chain + self.check_fork_length(&db, fork_id)?; + + db.commit()?; + + Ok(()) + } + + /// Check if a fork is longer than the main chain and perform reorganization if needed + fn check_fork_length( + &mut self, + db: &WriteTransaction, + fork_id: ForkId, + ) -> Result<(), BlockProcError> { + // Get fork information + let forks_table = db + .open_table(TABLE_FORKS) + .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; + + let fork_info = match forks_table + .get(fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? + { + Some(record) => record.value(), + None => { + return Err(BlockProcError::Custom(format!( + "Fork {} not found in database", + fork_id + ))); + } + }; + + let (_fork_start_height, _fork_tip_id, fork_height) = fork_info; + + // Get main chain height + let main_chain_height = self.get_main_chain_height(db)?; + + // If fork is longer than main chain, perform reorganization + if fork_height > main_chain_height { + log::info!( + target: NAME, + "Fork {} is longer than main chain ({} > {}), initiating chain reorganization", + fork_id, + fork_height, + main_chain_height + ); + + // Perform chain reorganization + self.perform_chain_reorganization(db, fork_id)?; + } else { + log::debug!( + target: NAME, + "Fork {} is not longer than main chain ({} <= {}), no reorganization needed", + fork_id, + fork_height, + main_chain_height + ); + } + + Ok(()) + } + + /// Perform a chain reorganization to adopt a fork as the new main chain + fn perform_chain_reorganization( + &mut self, + db: &WriteTransaction, + fork_id: ForkId, + ) -> Result<(), BlockProcError> { + // Get fork information + let forks_table = db + .open_table(TABLE_FORKS) + .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; + + let fork_info = match forks_table + .get(fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? + { + Some(record) => record.value(), + None => { + return Err(BlockProcError::Custom(format!( + "Fork {} not found in database", + fork_id + ))); + } + }; + + let (fork_start_height, fork_tip_id, fork_height) = fork_info; + + log::info!( + target: NAME, + "Starting chain reorganization: Fork {} from height {} to {} with tip block {}", + fork_id, + fork_start_height, + fork_height, + fork_tip_id + ); + + // 1. Find the common ancestor (could be the fork_start_height - 1) + let common_ancestor_height = fork_start_height; + + // 2. Get blocks to rollback from main chain + let main_chain_height = self.get_main_chain_height(db)?; + let blocks_to_rollback = + self.get_blocks_to_rollback(db, common_ancestor_height, main_chain_height)?; + + // 3. Get blocks to apply from fork chain + let blocks_to_apply = + self.get_blocks_to_apply(db, fork_id, common_ancestor_height, fork_height)?; + + log::info!( + target: NAME, + "Chain reorganization: rolling back {} blocks and applying {} blocks", + blocks_to_rollback.len(), + blocks_to_apply.len() + ); + + // 4. Roll back blocks from main chain + self.rollback_blocks(db, &blocks_to_rollback)?; + + // 5. Apply blocks from fork chain + self.apply_blocks(db, &blocks_to_apply)?; + + // 6. Update fork status + self.cleanup_after_reorg(db, fork_id)?; + + log::info!( + target: NAME, + "Chain reorganization complete: new chain height is {}", + fork_height + ); + + Ok(()) + } + + /// Records a potential fork in the blockchain. + /// This happens when we discover two different blocks at the same height. + fn record_fork( + &self, + db: &WriteTransaction, + height: u32, + existing_blockid: BlockId, + new_blockid: BlockId, + new_block_hash: BlockHash, + ) -> Result, BlockProcError> { + // Check if this block is already part of a known fork + if let Some(fork_id) = self.find_fork_by_block_hash(db, new_block_hash)? { + log::debug!( + target: NAME, + "Block {} at height {} is already part of fork {}", + new_block_hash, + height, + fork_id + ); + return Ok(None); + } + + // Generate a new fork ID + let fork_id = self.get_next_fork_id(db)?; + + // Record the fork in the forks table + let mut forks_table = db + .open_table(TABLE_FORKS) + .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; + + // A fork starts at the current height + forks_table + .insert(fork_id, (height, new_blockid, height)) + .map_err(|e| BlockProcError::Custom(format!("Fork insertion error: {}", e)))?; + + // Map the fork tip hash to the fork ID + let mut fork_tips_table = db + .open_table(TABLE_FORK_TIPS) + .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; + + fork_tips_table + .insert(new_block_hash.to_byte_array(), fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork tip mapping error: {}", e)))?; + + log::info!( + target: NAME, + "Created new fork {} at height {}: Main chain block {} vs Fork block {}", + fork_id, + height, + existing_blockid, + new_blockid + ); + + Ok(Some(fork_id)) + } + + /// Gets the next available block ID and increments the counter + fn get_next_block_id(&self, db: &WriteTransaction) -> Result { + let mut main = db + .open_table(TABLE_MAIN) + .map_err(BlockProcError::MainTable)?; + let mut block_id = match main + .get(REC_BLOCKID) + .map_err(|e| BlockProcError::Custom(format!("Block ID lookup error: {}", e)))? + { + Some(rec) => BlockId::from_bytes(rec.value()), + None => BlockId::start(), + }; + + block_id.inc_assign(); + main.insert(REC_BLOCKID, block_id.to_bytes().as_slice()) + .map_err(|e| BlockProcError::Custom(format!("Block ID update error: {}", e)))?; + + Ok(block_id) + } + + /// Gets the next available fork ID and increments the counter + fn get_next_fork_id(&self, db: &WriteTransaction) -> Result { + let mut main = db + .open_table(TABLE_MAIN) + .map_err(BlockProcError::MainTable)?; + + let mut fork_id = { + match main + .get(REC_FORK_ID) + .map_err(|e| BlockProcError::Custom(format!("Fork ID lookup error: {}", e)))? + { + Some(rec) => ForkId::from_bytes(rec.value()), + None => ForkId::start(), + } + }; + fork_id.inc_assign(); + main.insert(REC_FORK_ID, fork_id.to_bytes().as_slice()) + .map_err(|e| BlockProcError::Custom(format!("Fork ID update error: {}", e)))?; + + Ok(fork_id) + } + + /// Find fork ID by block hash + fn find_fork_by_block_hash( + &self, + db: &WriteTransaction, + block_hash: BlockHash, + ) -> Result, BlockProcError> { + let fork_tips_table = db + .open_table(TABLE_FORK_TIPS) + .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; + + if let Some(fork_id_record) = fork_tips_table + .get(block_hash.to_byte_array()) + .map_err(|e| BlockProcError::Custom(format!("Fork tip lookup error: {}", e)))? + { + return Ok(Some(fork_id_record.value())); + } + + Ok(None) + } + + /// Update fork information with a new block + fn update_fork( + &self, + db: &WriteTransaction, + fork_id: ForkId, + new_height: u32, + new_block_id: BlockId, + new_block_hash: BlockHash, + ) -> Result<(), BlockProcError> { + // Update the fork record + let mut forks_table = db + .open_table(TABLE_FORKS) + .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; + + // Get current fork info + let fork_info = match forks_table + .get(fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? + { + Some(record) => record.value(), + None => { + return Err(BlockProcError::Custom(format!( + "Fork {} not found in database", + fork_id + ))); + } + }; + + let (start_height, _old_tip_id, _old_height) = fork_info; + + // Update fork with new tip and height + forks_table + .insert(fork_id, (start_height, new_block_id, new_height)) + .map_err(|e| BlockProcError::Custom(format!("Fork update error: {}", e)))?; + + // Update the fork tip mapping + let mut fork_tips_table = db + .open_table(TABLE_FORK_TIPS) + .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; + + // Remove old tip mapping if it exists (need to look it up first) + if let Some(old_tip_hash) = self.find_fork_tip_hash(db, fork_id)? { + fork_tips_table + .remove(old_tip_hash.to_byte_array()) + .map_err(|e| BlockProcError::Custom(format!("Fork tip removal error: {}", e)))?; + } + + // Add new tip mapping + fork_tips_table + .insert(new_block_hash.to_byte_array(), fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork tip mapping error: {}", e)))?; + + log::debug!( + target: NAME, + "Updated fork {}: new height {}, new tip {}", + fork_id, + new_height, + new_block_id + ); + + Ok(()) + } + + /// Find the hash of a fork's tip block + fn find_fork_tip_hash( + &self, + db: &WriteTransaction, + fork_id: ForkId, + ) -> Result, BlockProcError> { + // Get fork info + let forks_table = db + .open_table(TABLE_FORKS) + .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; + + if let Some(fork_record) = forks_table + .get(fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? + { + let (_start_height, tip_id, _height) = fork_record.value(); + + // Find the block hash for this block ID + // This requires iterating through the blockids table + let blockids_table = db + .open_table(TABLE_BLOCKIDS) + .map_err(|e| BlockProcError::Custom(format!("Block IDs table error: {}", e)))?; + + let iter = blockids_table + .iter() + .map_err(|e| BlockProcError::Custom(format!("Block IDs iterator error: {}", e)))?; + + for entry in iter { + let (hash, id) = entry + .map_err(|e| BlockProcError::Custom(format!("Block ID entry error: {}", e)))?; + + if id.value() == tip_id { + // Found the block hash + let mut hash_array = [0u8; 32]; + hash_array.copy_from_slice(hash.value().as_slice()); + return Ok(Some(BlockHash::from_byte_array(hash_array))); + } + } + } + + Ok(None) + } + + /// Get the current height of the main chain + fn get_main_chain_height(&self, db: &WriteTransaction) -> Result { + // Find the maximum height in the heights table + let heights_table = db + .open_table(TABLE_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; + + let mut max_height = 0; + let iter = heights_table + .iter() + .map_err(|e| BlockProcError::Custom(format!("Heights iterator error: {}", e)))?; + + for entry in iter { + let (height, _) = + entry.map_err(|e| BlockProcError::Custom(format!("Heights entry error: {}", e)))?; + + let h = height.value(); + if h > max_height { + max_height = h; + } + } + + Ok(max_height) + } + + /// Check if a block with the given hash is in the main chain + fn is_block_in_main_chain( + &self, + db: &WriteTransaction, + block_hash: BlockHash, + ) -> Result { + // Look up the block ID + let blockids_table = db + .open_table(TABLE_BLOCKIDS) + .map_err(|e| BlockProcError::Custom(format!("Block IDs table error: {}", e)))?; + + let block_id = match blockids_table + .get(block_hash.to_byte_array()) + .map_err(|e| BlockProcError::Custom(format!("Block ID lookup error: {}", e)))? + { + Some(id_record) => id_record.value(), + None => return Ok(false), // Block not found + }; + + // Check if this block ID has a height entry + let block_heights_table = db + .open_table(TABLE_BLOCK_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; + + if block_heights_table + .get(block_id) + .map_err(|e| BlockProcError::Custom(format!("Block height lookup error: {}", e)))? + .is_some() + { + return Ok(true); // Block has a height, so it's in the main chain + } + + Ok(false) + } + + /// Get blocks that need to be rolled back from the main chain + /// Returns a list of (height, block_id) pairs, from highest to lowest height + fn get_blocks_to_rollback( + &self, + db: &WriteTransaction, + start_height: u32, + end_height: u32, + ) -> Result, BlockProcError> { + let mut blocks_to_rollback = Vec::new(); + + // We need to roll back from highest to lowest height + for height in (start_height..=end_height).rev() { + let heights_table = db + .open_table(TABLE_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; + + if let Some(block_id_record) = heights_table + .get(height) + .map_err(|e| BlockProcError::Custom(format!("Heights lookup error: {}", e)))? + { + blocks_to_rollback.push((height, block_id_record.value())); + } + } + + log::debug!( + target: NAME, + "Found {} blocks to roll back from heights {} to {}", + blocks_to_rollback.len(), + start_height, + end_height + ); + + Ok(blocks_to_rollback) + } + + /// Get blocks that need to be applied from the fork chain + /// Returns a list of (height, block_id) pairs, from lowest to highest height + fn get_blocks_to_apply( + &self, + db: &WriteTransaction, + fork_id: ForkId, + start_height: u32, + end_height: u32, + ) -> Result, BlockProcError> { + let mut blocks_to_apply = Vec::new(); + + // Find the blocks in the fork that need to be applied + // This is more complex as fork blocks aren't in the heights table yet + + // Get the tip block ID of the fork + let forks_table = db + .open_table(TABLE_FORKS) + .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; + + let fork_info = match forks_table + .get(fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? + { + Some(record) => record.value(), + None => { + return Err(BlockProcError::Custom(format!( + "Fork {} not found in database", + fork_id + ))); + } + }; + + let (_fork_start_height, fork_tip_id, fork_height) = fork_info; + + // We need to find all blocks from the tip down to the start height + // Since they're not yet in the heights table, we need to traverse backwards + + // Start with the tip block + let mut current_height = fork_height; + let mut current_block_id = fork_tip_id; + + // Collect blocks (from high to low) + let mut temp_blocks = Vec::new(); + + while current_height >= start_height { + temp_blocks.push((current_height, current_block_id)); + + if current_height == start_height { + break; + } + + // Find the parent of this block + let blks_table = db + .open_table(TABLE_BLKS) + .map_err(|e| BlockProcError::Custom(format!("Blocks table error: {}", e)))?; + + let block_header = match blks_table + .get(current_block_id) + .map_err(|e| BlockProcError::Custom(format!("Block lookup error: {}", e)))? + { + Some(record) => record.value(), + None => { + return Err(BlockProcError::Custom(format!( + "Block with ID {} not found in database", + current_block_id + ))); + } + }; + + let prev_hash = block_header.as_ref().prev_block_hash; + + // Find the block ID for this hash + let blockids_table = db + .open_table(TABLE_BLOCKIDS) + .map_err(|e| BlockProcError::Custom(format!("Block IDs table error: {}", e)))?; + + let prev_block_id = match blockids_table + .get(prev_hash.to_byte_array()) + .map_err(|e| BlockProcError::Custom(format!("Block ID lookup error: {}", e)))? + { + Some(record) => record.value(), + None => { + return Err(BlockProcError::Custom(format!( + "Previous block with hash {} not found in database", + prev_hash + ))); + } + }; + + current_block_id = prev_block_id; + current_height -= 1; + } + + // Reverse to get blocks from low to high + blocks_to_apply = temp_blocks.into_iter().rev().collect(); + + log::debug!( + target: NAME, + "Found {} blocks to apply from heights {} to {}", + blocks_to_apply.len(), + start_height, + end_height + ); + + Ok(blocks_to_apply) + } + + /// Roll back blocks from the main chain + fn rollback_blocks( + &self, + db: &WriteTransaction, + blocks: &[(u32, BlockId)], + ) -> Result<(), BlockProcError> { + if blocks.is_empty() { + return Ok(()); + } + + // Iterate through blocks to roll back (should be in descending height order) + for &(height, block_id) in blocks { + log::info!( + target: NAME, + "Rolling back block at height {}: block ID {}", + height, + block_id + ); + + // 1. Restore UTXOs spent in this block + let block_spends_table = db + .open_table(TABLE_BLOCK_SPENDS) + .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?; + + if let Some(spends_record) = block_spends_table + .get(block_id) + .map_err(|e| BlockProcError::Custom(format!("Block spends lookup error: {}", e)))? + { + let spends = spends_record.value(); + + // Restore each spent UTXO + let mut utxos_table = db + .open_table(TABLE_UTXOS) + .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?; + + for (txno, vout) in spends { + utxos_table.insert((txno, vout), ()).map_err(|e| { + BlockProcError::Custom(format!("UTXO restoration error: {}", e)) + })?; + + log::debug!( + target: NAME, + "Restored UTXO: txno={}, vout={}", + txno, + vout + ); + } + } + + // 2. Find all transactions in this block + let block_txs_table = db + .open_table(TABLE_BLOCK_TXS) + .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?; + + if let Some(txs_record) = block_txs_table + .get(block_id) + .map_err(|e| BlockProcError::Custom(format!("Block-txs lookup error: {}", e)))? + { + let txs = txs_record.value(); + + // For each transaction + for txno in txs { + // 3. Remove UTXOs created by this transaction + let txes_table = db + .open_table(TABLE_TXES) + .map_err(|e| BlockProcError::Custom(format!("Txes table error: {}", e)))?; + + if let Some(tx_record) = txes_table + .get(txno) + .map_err(|e| BlockProcError::Custom(format!("Tx lookup error: {}", e)))? + { + let tx = tx_record.value(); + let num_outputs = tx.as_ref().outputs.len(); + + let mut utxos_table = db.open_table(TABLE_UTXOS).map_err(|e| { + BlockProcError::Custom(format!("UTXOs table error: {}", e)) + })?; + + for vout in 0..num_outputs { + utxos_table.remove(&(txno, vout as u32)).map_err(|e| { + BlockProcError::Custom(format!("UTXO removal error: {}", e)) + })?; + + log::debug!( + target: NAME, + "Removed UTXO: txno={}, vout={}", + txno, + vout + ); + } + } + } + } + + // 4. Remove this block from the heights tables + let mut heights_table = db + .open_table(TABLE_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; + + heights_table + .remove(height) + .map_err(|e| BlockProcError::Custom(format!("Heights removal error: {}", e)))?; + + let mut block_heights_table = db + .open_table(TABLE_BLOCK_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; + + block_heights_table.remove(block_id).map_err(|e| { + BlockProcError::Custom(format!("Block height removal error: {}", e)) + })?; + + log::debug!( + target: NAME, + "Removed block height mapping for height {} and block ID {}", + height, + block_id + ); + } + + log::info!( + target: NAME, + "Successfully rolled back {} blocks", + blocks.len() + ); + + Ok(()) + } + + /// Apply blocks from the fork chain to make it the new main chain + fn apply_blocks( + &self, + db: &WriteTransaction, + blocks: &[(u32, BlockId)], + ) -> Result<(), BlockProcError> { + if blocks.is_empty() { + return Ok(()); + } + + // Iterate through blocks to apply (should be in ascending height order) + for &(height, block_id) in blocks { + log::info!( + target: NAME, + "Applying block at height {}: block ID {}", + height, + block_id + ); + + // Update the heights tables + let mut heights_table = db + .open_table(TABLE_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; + + heights_table + .insert(height, block_id) + .map_err(|e| BlockProcError::Custom(format!("Heights storage error: {}", e)))?; + + let mut block_heights_table = db + .open_table(TABLE_BLOCK_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; + + block_heights_table.insert(block_id, height).map_err(|e| { + BlockProcError::Custom(format!("Block height storage error: {}", e)) + })?; + + log::debug!( + target: NAME, + "Updated block height mapping for height {} and block ID {}", + height, + block_id + ); + } + + log::info!( + target: NAME, + "Successfully applied {} blocks", + blocks.len() + ); + + Ok(()) + } + + /// Clean up fork information after a successful reorganization + fn cleanup_after_reorg( + &self, + db: &WriteTransaction, + applied_fork_id: ForkId, + ) -> Result<(), BlockProcError> { + // Get information about the applied fork + let forks_table = db + .open_table(TABLE_FORKS) + .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; + + let fork_info = match forks_table + .get(applied_fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? + { + Some(record) => record.value(), + None => { + // Fork already removed, nothing to do + return Ok(()); + } + }; + + let (_start_height, _tip_id, fork_height) = fork_info; + + // Remove old forks that are now definitely invalid + // Any fork that starts at a height less than the applied fork's height + // and has not become the main chain by now should be removed + + let iter = forks_table + .iter() + .map_err(|e| BlockProcError::Custom(format!("Forks iterator error: {}", e)))?; + + let mut forks_to_remove = Vec::new(); + + for entry in iter { + let (fork_id, info) = + entry.map_err(|e| BlockProcError::Custom(format!("Fork entry error: {}", e)))?; + + let fork_id_value = fork_id.value(); + + // Skip the fork that was just applied + if fork_id_value == applied_fork_id { + continue; + } + + let (start_height, _tip_id, current_height) = info.value(); + + // If this fork is left behind the main chain, remove it + if start_height < fork_height && current_height <= fork_height { + forks_to_remove.push(fork_id_value); + } + } + + // Now remove the outdated forks + let mut forks_table = db + .open_table(TABLE_FORKS) + .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; + + for fork_id in &forks_to_remove { + // Find and remove the tip hash for this fork + if let Some(tip_hash) = self.find_fork_tip_hash(db, *fork_id)? { + let mut fork_tips_table = db + .open_table(TABLE_FORK_TIPS) + .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; + + fork_tips_table + .remove(tip_hash.to_byte_array()) + .map_err(|e| { + BlockProcError::Custom(format!("Fork tip removal error: {}", e)) + })?; + } + + // Remove the fork entry + forks_table + .remove(*fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork removal error: {}", e)))?; + + log::info!( + target: NAME, + "Removed obsolete fork {} after reorganization", + fork_id + ); + } + + // Finally, remove the applied fork as well + // Remove the tip hash mapping + if let Some(tip_hash) = self.find_fork_tip_hash(db, applied_fork_id)? { + let mut fork_tips_table = db + .open_table(TABLE_FORK_TIPS) + .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; + + fork_tips_table + .remove(tip_hash.to_byte_array()) + .map_err(|e| BlockProcError::Custom(format!("Fork tip removal error: {}", e)))?; + } + + // Remove the fork entry + forks_table + .remove(applied_fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork removal error: {}", e)))?; + + log::info!( + target: NAME, + "Removed applied fork {} after successful reorganization", + applied_fork_id + ); + + Ok(()) + } } #[derive(Debug, Display, Error, From)] @@ -1084,6 +1961,9 @@ pub enum BlockProcError { /// Unable to open blocks table: {0} BlockTable(TableError), + /// Unable to open heights table: {0} + HeightsTable(TableError), + /// Unable to write to blocks table: {0} BlockStorage(StorageError), @@ -1108,6 +1988,9 @@ pub enum BlockProcError { /// Orphan block detected: parent block {0} not found OrphanBlock(BlockHash), + /// Potential fork detected: new block {0} at height {1} conflicts with existing block {2} + PotentialFork(BlockHash, u32, BlockId), + /// Custom error: {0} Custom(String), } diff --git a/src/db.rs b/src/db.rs index 90c0d73..47c4f4c 100644 --- a/src/db.rs +++ b/src/db.rs @@ -41,7 +41,10 @@ pub struct TxNo(u40); #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Display)] #[display("#{0:08X}")] -pub struct BlockId(u32); +pub struct Id(u32); + +pub type BlockId = Id; +pub type ForkId = Id; impl TxNo { pub fn start() -> Self { TxNo(u40::ZERO) } @@ -49,9 +52,8 @@ impl TxNo { pub fn inc_assign(&mut self) { self.0 += u40::ONE } } -impl BlockId { - // 0 corresponds to the genesis block, and the height is aligned with other indexers - pub fn start() -> Self { BlockId(0) } +impl Id { + pub fn start() -> Self { Id(0) } pub fn inc_assign(&mut self) { self.0 += 1 } @@ -61,12 +63,12 @@ impl BlockId { // Method to get bytes representation pub fn to_bytes(&self) -> [u8; 4] { self.0.to_be_bytes() } - // Method to create BlockId from bytes + // Method to create Id from bytes pub fn from_bytes(bytes: &[u8]) -> Self { debug_assert_eq!(bytes.len(), 4); let mut array = [0u8; 4]; array.copy_from_slice(bytes); - BlockId(u32::from_be_bytes(array)) + Id(u32::from_be_bytes(array)) } } @@ -181,11 +183,11 @@ impl redb::Value for DbTx { fn type_name() -> TypeName { TypeName::new("BpNodeTx") } } -impl redb::Key for BlockId { +impl redb::Key for Id { fn compare(data1: &[u8], data2: &[u8]) -> Ordering { data1.cmp(data2) } } -impl redb::Value for BlockId { +impl redb::Value for Id { type SelfType<'a> = Self; type AsBytes<'a> = [u8; 4]; @@ -194,7 +196,7 @@ impl redb::Value for BlockId { fn from_bytes<'a>(data: &'a [u8]) -> Self::SelfType<'a> where Self: 'a { - BlockId::from_bytes(data) + Id::from_bytes(data) } fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> @@ -209,6 +211,10 @@ pub const REC_TXNO: &str = "txno"; pub const REC_BLOCKID: &str = "blockid"; pub const REC_CHAIN: &str = "chain"; pub const REC_ORPHANS: &str = "orphans"; +// Network information record in main table +pub const REC_NETWORK: &str = "network"; +// Constants for fork management +pub const REC_FORK_ID: &str = "forkid"; // Main metadata table storing global counters and states pub const TABLE_MAIN: TableDefinition<&'static str, &[u8]> = TableDefinition::new("main"); @@ -250,20 +256,29 @@ pub const TABLE_BLOCK_TXS: TableDefinition> = TableDefinition // Maps transaction input to the output it spends pub const TABLE_INPUTS: TableDefinition<(TxNo, u32), (TxNo, u32)> = TableDefinition::new("inputs"); -// Records UTXOs spent in each block for reorg handling +// Records all UTXOs spent in each block for potential rollback pub const TABLE_BLOCK_SPENDS: TableDefinition> = TableDefinition::new("block_spends"); -// Stores orphan blocks (blocks whose parent block is not yet processed) -// Maps block hash to serialized block data +// Stores orphan blocks (blocks received without their parent blocks) +// Maps block hash to (block data, timestamp) +// Note: Orphan blocks are not assigned BlockId values because: +// 1. They are in a temporary state and may never become part of the main chain +// 2. Many orphans may eventually be discarded when their ancestry is resolved +// 3. BlockId resources are preserved for blocks that are (or may become) part of the chain pub const TABLE_ORPHANS: TableDefinition<[u8; 32], (DbBlock, u64)> = TableDefinition::new("orphans"); -// Maps orphan block's parent hash to orphan block hash -// This allows quick lookup of orphan blocks when their parent is processed +// Maps parent block hash to list of orphan blocks that depend on it pub const TABLE_ORPHAN_PARENTS: TableDefinition<[u8; 32], Vec<[u8; 32]>> = TableDefinition::new("orphan_parents"); +// Tracks blockchain forks - maps fork ID to (fork_start_height, tip_block_id, current_height) +pub const TABLE_FORKS: TableDefinition = TableDefinition::new("forks"); + +// Maps fork tip hash to fork ID for quick lookup +pub const TABLE_FORK_TIPS: TableDefinition<[u8; 32], ForkId> = TableDefinition::new("fork_tips"); + // Each BP-Node instance is designed to work with a single blockchain network. // If multiple networks need to be indexed, separate instances should be used // with different data directories. The network information is stored in the diff --git a/src/lib.rs b/src/lib.rs index 2a17651..9af7b63 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -38,9 +38,10 @@ mod blocks; pub mod db; mod importer; -pub use blocks::{BlockProcError, BlockProcessor, REC_NETWORK}; +pub use blocks::{BlockProcError, BlockProcessor}; pub use broker::{Broker, BrokerError, BrokerRpcMsg, PATH_INDEXDB, TrackReq}; pub use config::Config; +pub use db::REC_NETWORK; pub use importer::{BlockImporter, ImporterCmd, ImporterMsg}; pub use rpc::{RpcCmd, RpcController}; //pub use query::{QueryWorker, QueryReq, QueryResp}; From 14de04734abdf052d508afa66f406b40ba862ebf Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Thu, 17 Apr 2025 00:18:37 +0800 Subject: [PATCH 08/21] refactor: optimize fork handling and resolve FIXME items in BlockProcessor Signed-off-by: will-bitlightlabs --- src/blocks.rs | 563 ++++++++++++++++++++++++++++++++++++-------------- src/db.rs | 18 +- 2 files changed, 421 insertions(+), 160 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index ef19083..2086317 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -39,9 +39,9 @@ use crate::ImporterMsg; use crate::db::{ BlockId, DbBlock, DbBlockHeader, DbMsg, DbTx, ForkId, REC_BLOCKID, REC_FORK_ID, REC_TXNO, TABLE_BLKS, TABLE_BLOCK_HEIGHTS, TABLE_BLOCK_SPENDS, TABLE_BLOCK_TXS, TABLE_BLOCKIDS, - TABLE_FORK_TIPS, TABLE_FORKS, TABLE_HEIGHTS, TABLE_INPUTS, TABLE_MAIN, TABLE_ORPHAN_PARENTS, - TABLE_ORPHANS, TABLE_OUTS, TABLE_SPKS, TABLE_TX_BLOCKS, TABLE_TXES, TABLE_TXIDS, TABLE_UTXOS, - TxNo, + TABLE_FORK_BLOCKS, TABLE_FORK_TIPS, TABLE_FORKS, TABLE_HEIGHTS, TABLE_INPUTS, TABLE_MAIN, + TABLE_ORPHAN_PARENTS, TABLE_ORPHANS, TABLE_OUTS, TABLE_SPKS, TABLE_TX_BLOCKS, TABLE_TXES, + TABLE_TXIDS, TABLE_UTXOS, TxNo, }; const NAME: &str = "blockproc"; @@ -69,7 +69,11 @@ impl BlockProcessor { } // Helper function to calculate block height based on previous block hash - fn calculate_block_height(&self, block: &Block) -> Result { + fn calculate_block_height( + &self, + block: &Block, + db: &WriteTransaction, + ) -> Result { // For genesis block, height is always 0 // Check for all zeros hash which is the genesis block's prev_hash let zero_hash = [0u8; 32]; @@ -78,10 +82,6 @@ impl BlockProcessor { } // Find block height of the previous block and add 1 - let (tx, rx) = crossbeam_channel::bounded(1); - self.db.send(DbMsg::Write(tx))?; - let db = rx.recv()?; - // Lookup the block ID for the previous block hash let blockids_table = db .open_table(TABLE_BLOCKIDS) @@ -101,9 +101,8 @@ impl BlockProcessor { return Err(BlockProcError::OrphanBlock(block.header.prev_block_hash)); } - let prev_blockid_record = prev_blockid.unwrap(); // Get the previous block's ID - let prev_blockid = prev_blockid_record.value(); + let prev_blockid = prev_blockid.unwrap().value(); // First check the BlockId to height mapping table which is more efficient let block_heights_table = db @@ -118,14 +117,37 @@ impl BlockProcessor { prev_height + 1 }) .ok_or_else(|| { - BlockProcError::Custom(format!( - "Database inconsistency: Previous block with ID {} found in blockids table \ - but not in any height table", + // Parent block has blockid but no height record - this indicates a potential fork + // This typically happens when the parent block is part of a fork chain + let block_hash = block.block_hash(); + let parent_hash = block.header.prev_block_hash; + // Check if parent block is part of a known fork + if let Some(fork_id) = match self.find_fork_by_block_id(db, prev_blockid){ + Ok(Some(id)) => Some(id), + Ok(None) => None, + Err(e) => return e, + } { + // Found the fork - this block extends a fork chain + log::info!( + target: NAME, + "Block {} has parent {} which is part of fork {}", + block_hash, + parent_hash, + fork_id + ); + // Return specialized error for fork chain extension + return BlockProcError::ForkChainExtension(block_hash, parent_hash); + } + // If not part of a fork, it's likely a database inconsistency + log::warn!( + target: NAME, + "Database inconsistency: Block {} has parent {} with ID {} but no height record", + block_hash, + parent_hash, prev_blockid - )) + ); + BlockProcError::DatabaseInconsistency(block_hash, parent_hash, prev_blockid) })?; - - // Store block height information let heights_table = db .open_table(TABLE_HEIGHTS) .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; @@ -179,7 +201,7 @@ impl BlockProcessor { let process = || -> Result<(), BlockProcError> { // Calculate the block height based on previous block // This function will also detect orphan blocks and potential forks - let height = self.calculate_block_height(&block)?; + let height = self.calculate_block_height(&block, &db)?; let blockid = self.get_next_block_id(&db)?; @@ -431,7 +453,7 @@ impl BlockProcessor { return self.save_orphan_block(id, block_clone); } Err(BlockProcError::PotentialFork(new_block_hash, height, existing_blockid)) => { - // Handle potential fork case + // Handle potential fork case - conflict with existing block at same height if let Err(err) = db.abort() { log::warn!(target: NAME, "Unable to abort failed database transaction due to {err}"); }; @@ -439,8 +461,12 @@ impl BlockProcessor { // Record this as a potential fork for later verification // Store the new block but don't update the height tables yet // We'll only perform a reorganization if this fork becomes the longest chain - // TODO: store the new block in the database - self.process_potential_fork(id, &block_clone, height, existing_blockid)?; + self.process_potential_fork( + id, + &block_clone, + Some(height), + Some(existing_blockid), + )?; return Err(BlockProcError::PotentialFork( new_block_hash, @@ -448,6 +474,23 @@ impl BlockProcessor { existing_blockid, )); } + Err(BlockProcError::ForkChainExtension(block_hash, parent_hash)) => { + // Handle fork chain extension case - parent block is part of a fork + if let Err(err) = db.abort() { + log::warn!(target: NAME, "Unable to abort failed database transaction due to {err}"); + }; + + log::info!( + target: NAME, + "Processing block {} as fork chain extension with parent {}", + block_hash, + parent_hash + ); + + self.process_potential_fork(id, &block_clone, None, None)?; + + return Ok(0); + } Err(e) => { // Handle other errors if let Err(err) = db.abort() { @@ -1010,69 +1053,89 @@ impl BlockProcessor { &mut self, block_hash: BlockHash, block: &Block, - height: u32, - existing_blockid: BlockId, + height: Option, + existing_blockid: Option, ) -> Result<(), BlockProcError> { let (tx, rx) = crossbeam_channel::bounded(1); self.db.send(DbMsg::Write(tx))?; let db = rx.recv()?; + // Get a new block ID for this fork block let new_blockid = self.get_next_block_id(&db)?; - // First, check if this block is extending an existing fork + + { + // Store the block header + let mut blocks_table = db + .open_table(TABLE_BLKS) + .map_err(BlockProcError::BlockTable)?; + blocks_table + .insert(new_blockid, DbBlockHeader::from(block.header)) + .map_err(BlockProcError::BlockStorage)?; + + // Store the complete block data in the fork blocks table + let mut fork_blocks_table = db + .open_table(TABLE_FORK_BLOCKS) + .map_err(|e| BlockProcError::Custom(format!("Fork blocks table error: {}", e)))?; + fork_blocks_table + .insert(new_blockid, DbBlock::from(block.clone())) + .map_err(|e| BlockProcError::Custom(format!("Fork block storage error: {}", e)))?; + + // Map block hash to the assigned block ID + let mut blockids_table = db + .open_table(TABLE_BLOCKIDS) + .map_err(|e| BlockProcError::Custom(format!("Block IDs table error: {}", e)))?; + blockids_table + .insert(block_hash.to_byte_array(), new_blockid) + .map_err(|e| BlockProcError::Custom(format!("Block ID storage error: {}", e)))?; + } + + // First step: Check if this block extends an existing fork + // Find the parent block ID + let parent_block_id = self + .find_block_id_by_hash(&db, block.header.prev_block_hash)? + .ok_or(BlockProcError::Custom(format!( + "Parent block not found: {}", + block.header.prev_block_hash + )))?; let fork_id = if let Some(parent_fork_id) = - self.find_fork_by_block_hash(&db, block.header.prev_block_hash)? + self.find_fork_by_block_id(&db, parent_block_id)? { // This block extends an existing fork log::info!( target: NAME, - "Block {} at height {} extends existing fork {}", + "Block {} extends existing fork {}", block_hash, - height, parent_fork_id ); // Update the fork with this new block - self.update_fork(&db, parent_fork_id, height, new_blockid, block_hash)?; + self.update_fork(&db, parent_fork_id, new_blockid)?; parent_fork_id } else { // This block might start a new fork // First check if its parent is in the main chain - if self.is_block_in_main_chain(&db, block.header.prev_block_hash)? { - // Create a new fork - let new_fork_id = match self.record_fork( - &db, - height, - existing_blockid, - new_blockid, - block_hash, - )? { - Some(id) => id, - None => { - // This shouldn't happen, but handle it gracefully - log::warn!( - target: NAME, - "Failed to create new fork for block {} at height {}", - block_hash, - height - ); - return Ok(()); - } - }; - - new_fork_id - } else { - // Parent is not in main chain and not in a known fork - // This could be an orphan or invalid block + if !self.is_block_in_main_chain(&db, block.header.prev_block_hash)? { + // Parent block is not in main chain and not in a known fork log::warn!( target: NAME, - "Block {} at height {} is disconnected: parent {} not found in main chain or forks", + "Block {} is disconnected: parent {} not found in main chain or forks", block_hash, - height, block.header.prev_block_hash ); return Ok(()); } + + self.record_fork( + &db, + height + .ok_or(BlockProcError::Custom("Height is required for new fork".to_string()))?, + existing_blockid.ok_or(BlockProcError::Custom( + "Existing block ID is required for new fork".to_string(), + ))?, + new_blockid, + block_hash, + )? }; // Check if this fork is now longer than the main chain @@ -1107,7 +1170,7 @@ impl BlockProcessor { } }; - let (_fork_start_height, _fork_tip_id, fork_height) = fork_info; + let (_fork_start_height, _fork_start_block_id, _fork_tip_id, fork_height) = fork_info; // Get main chain height let main_chain_height = self.get_main_chain_height(db)?; @@ -1161,7 +1224,7 @@ impl BlockProcessor { } }; - let (fork_start_height, fork_tip_id, fork_height) = fork_info; + let (fork_start_height, _fork_start_block_id, fork_tip_id, fork_height) = fork_info; log::info!( target: NAME, @@ -1172,7 +1235,7 @@ impl BlockProcessor { fork_tip_id ); - // 1. Find the common ancestor (could be the fork_start_height - 1) + // 1. Find the common ancestor let common_ancestor_height = fork_start_height; // 2. Get blocks to rollback from main chain @@ -1218,9 +1281,9 @@ impl BlockProcessor { existing_blockid: BlockId, new_blockid: BlockId, new_block_hash: BlockHash, - ) -> Result, BlockProcError> { + ) -> Result { // Check if this block is already part of a known fork - if let Some(fork_id) = self.find_fork_by_block_hash(db, new_block_hash)? { + if let Some(fork_id) = self.find_fork_by_block_id(db, new_blockid)? { log::debug!( target: NAME, "Block {} at height {} is already part of fork {}", @@ -1228,7 +1291,7 @@ impl BlockProcessor { height, fork_id ); - return Ok(None); + return Ok(fork_id); } // Generate a new fork ID @@ -1239,18 +1302,19 @@ impl BlockProcessor { .open_table(TABLE_FORKS) .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; - // A fork starts at the current height + // A fork starts at the current height with the current block + // Parameters: (fork_start_height, fork_start_block_id, tip_block_id, current_height) forks_table - .insert(fork_id, (height, new_blockid, height)) + .insert(fork_id, (height, existing_blockid, new_blockid, height)) .map_err(|e| BlockProcError::Custom(format!("Fork insertion error: {}", e)))?; - // Map the fork tip hash to the fork ID + // Map the fork tip block ID to the fork ID let mut fork_tips_table = db .open_table(TABLE_FORK_TIPS) .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; fork_tips_table - .insert(new_block_hash.to_byte_array(), fork_id) + .insert(new_blockid, fork_id) .map_err(|e| BlockProcError::Custom(format!("Fork tip mapping error: {}", e)))?; log::info!( @@ -1262,7 +1326,7 @@ impl BlockProcessor { new_blockid ); - Ok(Some(fork_id)) + Ok(fork_id) } /// Gets the next available block ID and increments the counter @@ -1307,18 +1371,33 @@ impl BlockProcessor { Ok(fork_id) } - /// Find fork ID by block hash - fn find_fork_by_block_hash( + /// Find block ID by block hash + fn find_block_id_by_hash( &self, db: &WriteTransaction, block_hash: BlockHash, + ) -> Result, BlockProcError> { + let blockids_table = db + .open_table(TABLE_BLOCKIDS) + .map_err(BlockProcError::BlockTable)?; + let block_id = blockids_table + .get(block_hash.to_byte_array()) + .map_err(|e| BlockProcError::Custom(format!("Block ID lookup error: {}", e)))?; + if let Some(record) = block_id { Ok(Some(record.value())) } else { Ok(None) } + } + + /// Find fork ID by block hash + fn find_fork_by_block_id( + &self, + db: &WriteTransaction, + block_id: BlockId, ) -> Result, BlockProcError> { let fork_tips_table = db .open_table(TABLE_FORK_TIPS) .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; if let Some(fork_id_record) = fork_tips_table - .get(block_hash.to_byte_array()) + .get(block_id) .map_err(|e| BlockProcError::Custom(format!("Fork tip lookup error: {}", e)))? { return Ok(Some(fork_id_record.value())); @@ -1332,9 +1411,7 @@ impl BlockProcessor { &self, db: &WriteTransaction, fork_id: ForkId, - new_height: u32, new_block_id: BlockId, - new_block_hash: BlockHash, ) -> Result<(), BlockProcError> { // Update the fork record let mut forks_table = db @@ -1355,11 +1432,12 @@ impl BlockProcessor { } }; - let (start_height, _old_tip_id, _old_height) = fork_info; + let (start_height, start_block_id, old_tip_id, current_height) = fork_info; + let new_height = current_height + 1; // Update fork with new tip and height forks_table - .insert(fork_id, (start_height, new_block_id, new_height)) + .insert(fork_id, (start_height, start_block_id, new_block_id, new_height)) .map_err(|e| BlockProcError::Custom(format!("Fork update error: {}", e)))?; // Update the fork tip mapping @@ -1367,16 +1445,14 @@ impl BlockProcessor { .open_table(TABLE_FORK_TIPS) .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; - // Remove old tip mapping if it exists (need to look it up first) - if let Some(old_tip_hash) = self.find_fork_tip_hash(db, fork_id)? { - fork_tips_table - .remove(old_tip_hash.to_byte_array()) - .map_err(|e| BlockProcError::Custom(format!("Fork tip removal error: {}", e)))?; - } + // Remove old tip mapping if it exists + fork_tips_table + .remove(old_tip_id) + .map_err(|e| BlockProcError::Custom(format!("Fork tip removal error: {}", e)))?; // Add new tip mapping fork_tips_table - .insert(new_block_hash.to_byte_array(), fork_id) + .insert(new_block_id, fork_id) .map_err(|e| BlockProcError::Custom(format!("Fork tip mapping error: {}", e)))?; log::debug!( @@ -1390,49 +1466,6 @@ impl BlockProcessor { Ok(()) } - /// Find the hash of a fork's tip block - fn find_fork_tip_hash( - &self, - db: &WriteTransaction, - fork_id: ForkId, - ) -> Result, BlockProcError> { - // Get fork info - let forks_table = db - .open_table(TABLE_FORKS) - .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; - - if let Some(fork_record) = forks_table - .get(fork_id) - .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? - { - let (_start_height, tip_id, _height) = fork_record.value(); - - // Find the block hash for this block ID - // This requires iterating through the blockids table - let blockids_table = db - .open_table(TABLE_BLOCKIDS) - .map_err(|e| BlockProcError::Custom(format!("Block IDs table error: {}", e)))?; - - let iter = blockids_table - .iter() - .map_err(|e| BlockProcError::Custom(format!("Block IDs iterator error: {}", e)))?; - - for entry in iter { - let (hash, id) = entry - .map_err(|e| BlockProcError::Custom(format!("Block ID entry error: {}", e)))?; - - if id.value() == tip_id { - // Found the block hash - let mut hash_array = [0u8; 32]; - hash_array.copy_from_slice(hash.value().as_slice()); - return Ok(Some(BlockHash::from_byte_array(hash_array))); - } - } - } - - Ok(None) - } - /// Get the current height of the main chain fn get_main_chain_height(&self, db: &WriteTransaction) -> Result { // Find the maximum height in the heights table @@ -1537,8 +1570,6 @@ impl BlockProcessor { start_height: u32, end_height: u32, ) -> Result, BlockProcError> { - let mut blocks_to_apply = Vec::new(); - // Find the blocks in the fork that need to be applied // This is more complex as fork blocks aren't in the heights table yet @@ -1560,7 +1591,7 @@ impl BlockProcessor { } }; - let (_fork_start_height, fork_tip_id, fork_height) = fork_info; + let (_fork_start_height, _fork_start_block_id, fork_tip_id, fork_height) = fork_info; // We need to find all blocks from the tip down to the start height // Since they're not yet in the heights table, we need to traverse backwards @@ -1622,7 +1653,7 @@ impl BlockProcessor { } // Reverse to get blocks from low to high - blocks_to_apply = temp_blocks.into_iter().rev().collect(); + let blocks_to_apply: Vec<(u32, crate::db::Id)> = temp_blocks.into_iter().rev().collect(); log::debug!( target: NAME, @@ -1764,6 +1795,8 @@ impl BlockProcessor { } /// Apply blocks from the fork chain to make it the new main chain + /// This method processes all transactions in the fork blocks to ensure + /// the UTXO set and other indexes are properly updated fn apply_blocks( &self, db: &WriteTransaction, @@ -1773,6 +1806,18 @@ impl BlockProcessor { return Ok(()); } + // Get current transaction number - we'll need this for processing new transactions + let mut txno = { + let main = db + .open_table(TABLE_MAIN) + .map_err(BlockProcError::MainTable)?; + let rec = main + .get(REC_TXNO) + .map_err(BlockProcError::TxNoAbsent)? + .unwrap(); + TxNo::from_slice(rec.value()).map_err(BlockProcError::TxNoInvalid)? + }; + // Iterate through blocks to apply (should be in ascending height order) for &(height, block_id) in blocks { log::info!( @@ -1782,6 +1827,203 @@ impl BlockProcessor { block_id ); + // Get the complete block data from fork blocks table + let fork_blocks_table = db + .open_table(TABLE_FORK_BLOCKS) + .map_err(|e| BlockProcError::Custom(format!("Fork blocks table error: {}", e)))?; + + let block_data = fork_blocks_table + .get(block_id) + .map_err(|e| BlockProcError::Custom(format!("Fork block lookup error: {}", e)))? + .ok_or_else(|| { + BlockProcError::Custom(format!("Fork block {} not found in database", block_id)) + })? + .value(); + + let block = block_data.as_ref(); + log::debug!( + target: NAME, + "Processing {} transactions from fork block {}", + block.transactions.len(), + block_id + ); + + // Track UTXOs spent in this block + let mut block_spends = Vec::new(); + + // Track all transactions in this block + let mut block_txs = Vec::new(); + + // Process all transactions in the block + for tx in &block.transactions { + let txid = tx.txid(); + + // For fork blocks, txids may already be in the database with assigned txno + // Check if this txid already exists + let txids_table = db + .open_table(TABLE_TXIDS) + .map_err(BlockProcError::TxidTable)?; + + let existing_txno = txids_table + .get(txid.to_byte_array()) + .map_err(BlockProcError::TxidLookup)? + .map(|v| v.value()); + + let tx_txno = if let Some(existing) = existing_txno { + // Use the existing transaction number + existing + } else { + // Assign a new transaction number + txno.inc_assign(); + txno + }; + + // Add transaction to the list for this block + block_txs.push(tx_txno); + + // If this is a new transaction, store its mapping and data + if existing_txno.is_none() { + // Store transaction ID to transaction number mapping + let mut txids_table = db + .open_table(TABLE_TXIDS) + .map_err(BlockProcError::TxidTable)?; + txids_table + .insert(txid.to_byte_array(), tx_txno) + .map_err(BlockProcError::TxidStorage)?; + + // Store the transaction data + let mut txes_table = db + .open_table(TABLE_TXES) + .map_err(BlockProcError::TxesTable)?; + txes_table + .insert(tx_txno, DbTx::from(tx.clone())) + .map_err(BlockProcError::TxesStorage)?; + } + + // Associate transaction with block ID (update even if transaction existed) + let mut tx_blocks_table = db + .open_table(TABLE_TX_BLOCKS) + .map_err(|e| BlockProcError::Custom(format!("Tx-blocks table error: {}", e)))?; + tx_blocks_table.insert(tx_txno, block_id).map_err(|e| { + BlockProcError::Custom(format!("Tx-blocks storage error: {}", e)) + })?; + + // Process transaction inputs + for (vin_idx, input) in tx.inputs.iter().enumerate() { + if !input.prev_output.is_coinbase() { + let prev_txid = input.prev_output.txid; + let prev_vout = input.prev_output.vout; + + // Look up previous transaction number + if let Some(prev_txno) = txids_table + .get(prev_txid.to_byte_array()) + .map_err(BlockProcError::TxidLookup)? + .map(|v| v.value()) + { + // Mark UTXO as spent + let mut utxos_table = db.open_table(TABLE_UTXOS).map_err(|e| { + BlockProcError::Custom(format!("UTXOs table error: {}", e)) + })?; + utxos_table + .remove(&(prev_txno, prev_vout.into_u32())) + .map_err(|e| { + BlockProcError::Custom(format!("UTXOs removal error: {}", e)) + })?; + + // Record UTXO spent in this block + block_spends.push((prev_txno, prev_vout.into_u32())); + + // Record input-output mapping + let mut inputs_table = db.open_table(TABLE_INPUTS).map_err(|e| { + BlockProcError::Custom(format!("Inputs table error: {}", e)) + })?; + inputs_table + .insert( + (tx_txno, vin_idx as u32), + (prev_txno, prev_vout.into_u32()), + ) + .map_err(|e| { + BlockProcError::Custom(format!("Inputs storage error: {}", e)) + })?; + + // Update spending relationships + let mut outs_table = db.open_table(TABLE_OUTS).map_err(|e| { + BlockProcError::Custom(format!("Outs table error: {}", e)) + })?; + let mut spending_txs = outs_table + .get(prev_txno) + .map_err(|e| { + BlockProcError::Custom(format!("Outs lookup error: {}", e)) + })? + .map(|v| v.value().to_vec()) + .unwrap_or_default(); + + // Avoid duplicate entries + if !spending_txs.contains(&tx_txno) { + spending_txs.push(tx_txno); + outs_table.insert(prev_txno, spending_txs).map_err(|e| { + BlockProcError::Custom(format!("Outs update error: {}", e)) + })?; + } + } + } + } + + // Process transaction outputs + for (vout_idx, output) in tx.outputs.iter().enumerate() { + // Add new UTXO + let mut utxos_table = db + .open_table(TABLE_UTXOS) + .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?; + utxos_table + .insert((tx_txno, vout_idx as u32), ()) + .map_err(|e| { + BlockProcError::Custom(format!("UTXOs storage error: {}", e)) + })?; + + // Index script pubkey + let script = &output.script_pubkey; + if !script.is_empty() { + let mut spks_table = db.open_table(TABLE_SPKS).map_err(|e| { + BlockProcError::Custom(format!("SPKs table error: {}", e)) + })?; + let mut txnos = spks_table + .get(script.as_slice()) + .map_err(|e| { + BlockProcError::Custom(format!("SPKs lookup error: {}", e)) + })? + .map(|v| v.value().to_vec()) + .unwrap_or_default(); + + // Avoid duplicate entries + if !txnos.contains(&tx_txno) { + txnos.push(tx_txno); + spks_table.insert(script.as_slice(), txnos).map_err(|e| { + BlockProcError::Custom(format!("SPKs update error: {}", e)) + })?; + } + } + } + } + + // Store all transaction numbers in this block + let mut block_txs_table = db + .open_table(TABLE_BLOCK_TXS) + .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?; + block_txs_table + .insert(block_id, block_txs) + .map_err(|e| BlockProcError::Custom(format!("Block-txs storage error: {}", e)))?; + + // Store UTXOs spent in this block + let mut block_spends_table = db + .open_table(TABLE_BLOCK_SPENDS) + .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?; + block_spends_table + .insert(block_id, block_spends) + .map_err(|e| { + BlockProcError::Custom(format!("Block spends storage error: {}", e)) + })?; + // Update the heights tables let mut heights_table = db .open_table(TABLE_HEIGHTS) @@ -1807,9 +2049,16 @@ impl BlockProcessor { ); } + // Update the global transaction counter + let mut main = db + .open_table(TABLE_MAIN) + .map_err(BlockProcError::MainTable)?; + main.insert(REC_TXNO, txno.to_byte_array().as_slice()) + .map_err(BlockProcError::TxNoUpdate)?; + log::info!( target: NAME, - "Successfully applied {} blocks", + "Successfully applied {} blocks with all transactions", blocks.len() ); @@ -1838,12 +2087,11 @@ impl BlockProcessor { } }; - let (_start_height, _tip_id, fork_height) = fork_info; + let (_start_height, _start_block_id, _tip_id, fork_height) = fork_info; // Remove old forks that are now definitely invalid // Any fork that starts at a height less than the applied fork's height // and has not become the main chain by now should be removed - let iter = forks_table .iter() .map_err(|e| BlockProcError::Custom(format!("Forks iterator error: {}", e)))?; @@ -1861,11 +2109,11 @@ impl BlockProcessor { continue; } - let (start_height, _tip_id, current_height) = info.value(); + let (start_height, _start_block_id, tip_id, current_height) = info.value(); // If this fork is left behind the main chain, remove it if start_height < fork_height && current_height <= fork_height { - forks_to_remove.push(fork_id_value); + forks_to_remove.push((fork_id_value, tip_id)); } } @@ -1874,19 +2122,15 @@ impl BlockProcessor { .open_table(TABLE_FORKS) .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; - for fork_id in &forks_to_remove { - // Find and remove the tip hash for this fork - if let Some(tip_hash) = self.find_fork_tip_hash(db, *fork_id)? { - let mut fork_tips_table = db - .open_table(TABLE_FORK_TIPS) - .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; - - fork_tips_table - .remove(tip_hash.to_byte_array()) - .map_err(|e| { - BlockProcError::Custom(format!("Fork tip removal error: {}", e)) - })?; - } + let mut fork_tips_table = db + .open_table(TABLE_FORK_TIPS) + .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; + + for (fork_id, tip_id) in &forks_to_remove { + // Remove the tip mapping + fork_tips_table + .remove(*tip_id) + .map_err(|e| BlockProcError::Custom(format!("Fork tip removal error: {}", e)))?; // Remove the fork entry forks_table @@ -1900,17 +2144,20 @@ impl BlockProcessor { ); } - // Finally, remove the applied fork as well - // Remove the tip hash mapping - if let Some(tip_hash) = self.find_fork_tip_hash(db, applied_fork_id)? { - let mut fork_tips_table = db - .open_table(TABLE_FORK_TIPS) - .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; + let (_start_height, _start_block_id, tip_id, _current_height) = { + // Finally, remove the applied fork as well + // Get the tip ID for the applied fork + let fork_info = forks_table + .get(applied_fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? + .expect("Applied fork should exist"); + fork_info.value() + }; - fork_tips_table - .remove(tip_hash.to_byte_array()) - .map_err(|e| BlockProcError::Custom(format!("Fork tip removal error: {}", e)))?; - } + // Remove the tip mapping + fork_tips_table + .remove(tip_id) + .map_err(|e| BlockProcError::Custom(format!("Fork tip removal error: {}", e)))?; // Remove the fork entry forks_table @@ -1991,6 +2238,12 @@ pub enum BlockProcError { /// Potential fork detected: new block {0} at height {1} conflicts with existing block {2} PotentialFork(BlockHash, u32, BlockId), + /// Fork chain extension: new block {0} extends fork chain with parent block {1} + ForkChainExtension(BlockHash, BlockHash), + + /// Database inconsistency: block {0} has parent {1} with ID {2} but missing height + DatabaseInconsistency(BlockHash, BlockHash, BlockId), + /// Custom error: {0} Custom(String), } diff --git a/src/db.rs b/src/db.rs index 47c4f4c..1243f95 100644 --- a/src/db.rs +++ b/src/db.rs @@ -273,11 +273,19 @@ pub const TABLE_ORPHANS: TableDefinition<[u8; 32], (DbBlock, u64)> = pub const TABLE_ORPHAN_PARENTS: TableDefinition<[u8; 32], Vec<[u8; 32]>> = TableDefinition::new("orphan_parents"); -// Tracks blockchain forks - maps fork ID to (fork_start_height, tip_block_id, current_height) -pub const TABLE_FORKS: TableDefinition = TableDefinition::new("forks"); - -// Maps fork tip hash to fork ID for quick lookup -pub const TABLE_FORK_TIPS: TableDefinition<[u8; 32], ForkId> = TableDefinition::new("fork_tips"); +// Tracks blockchain forks - maps fork ID to (fork_start_height, fork_start_block_id, tip_block_id, +// current_height) +pub const TABLE_FORKS: TableDefinition = + TableDefinition::new("forks"); + +// Maps fork tip block ID to fork ID for quick lookup +pub const TABLE_FORK_TIPS: TableDefinition = TableDefinition::new("fork_tips"); + +// Stores complete block data for fork blocks +// This allows us to access the full block content when performing chain reorganization +// Fork blocks are stored with their assigned BlockId like main chain blocks +pub const TABLE_FORK_BLOCKS: TableDefinition = + TableDefinition::new("fork_blocks"); // Each BP-Node instance is designed to work with a single blockchain network. // If multiple networks need to be indexed, separate instances should be used From 399e5e4bdf9c118779c3b63f4c4168a07fc19319 Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Sat, 19 Apr 2025 00:37:08 +0800 Subject: [PATCH 09/21] feat: Optimize core components and fix testing issues Signed-off-by: will-bitlightlabs --- providers/bitcoincore/src/main.rs | 23 +- src/bin/bpd.rs | 348 ++++++++++++++++++++---------- src/blocks.rs | 27 ++- 3 files changed, 274 insertions(+), 124 deletions(-) diff --git a/providers/bitcoincore/src/main.rs b/providers/bitcoincore/src/main.rs index bd4943f..42d15c7 100644 --- a/providers/bitcoincore/src/main.rs +++ b/providers/bitcoincore/src/main.rs @@ -39,7 +39,11 @@ use strict_encoding::Ident; pub const AGENT: &str = "BC_BP"; -pub const BLOCK_SEPARATOR: [u8; 4] = [0xF9, 0xBE, 0xB4, 0xD9]; +pub const BITCOIN_BLOCK_SEPARATOR: [u8; 4] = [0xF9, 0xBE, 0xB4, 0xD9]; +pub const TESTNET_BLOCK_SEPARATOR: [u8; 4] = [0x0B, 0x11, 0x09, 0x07]; +pub const TESTNET4_BLOCK_SEPARATOR: [u8; 4] = [0x1c, 0x16, 0x3f, 0x28]; +pub const SIGNET_BLOCK_SEPARATOR: [u8; 4] = [0x0A, 0x03, 0xCF, 0x40]; +pub const REGTEST_BLOCK_SEPARATOR: [u8; 4] = [0xFA, 0xBF, 0xB5, 0xDA]; /// Command-line arguments #[derive(Parser)] @@ -104,6 +108,15 @@ fn read_blocks(client: Client, args: Args) { exit(1); } + // Select the correct block separator according to the network type + let block_separator = match args.network { + Network::Mainnet => BITCOIN_BLOCK_SEPARATOR, + Network::Testnet3 => TESTNET_BLOCK_SEPARATOR, + Network::Testnet4 => TESTNET4_BLOCK_SEPARATOR, + Network::Signet => SIGNET_BLOCK_SEPARATOR, + Network::Regtest => REGTEST_BLOCK_SEPARATOR, + }; + let mut file_no: u32 = 0; let mut total_blocks: u32 = 0; let mut total_tx: u64 = 0; @@ -138,7 +151,13 @@ fn read_blocks(client: Client, args: Args) { exit(4); } } - if buf != BLOCK_SEPARATOR { + + if buf == [0x00, 0x00, 0x00, 0x00] { + log::info!("Reached end of block file"); + break; + } + + if buf != block_separator { log::error!( "Invalid block separator 0x{:02X}{:02X}{:02X}{:02X} before block #{block_no}", buf[0], diff --git a/src/bin/bpd.rs b/src/bin/bpd.rs index bd7268d..d66b130 100644 --- a/src/bin/bpd.rs +++ b/src/bin/bpd.rs @@ -27,16 +27,33 @@ extern crate clap; mod opts; use std::fs; +use std::path::Path; use std::process::{ExitCode, Termination, exit}; pub use bpnode; use bpnode::{Broker, BrokerError, Config, PATH_INDEXDB}; +use bpwallet::Network; use clap::Parser; use loglevel::LogLevel; -use redb::Database; +use redb::{Database, Key, TableDefinition, Value, WriteTransaction}; use crate::opts::{Command, Opts}; +/// Exit status codes for different error conditions +const EXIT_PATH_ACCESS_ERROR: i32 = 1; +const EXIT_DB_EXISTS_ERROR: i32 = 2; +const EXIT_DIR_CREATE_ERROR: i32 = 3; +const EXIT_DB_CREATE_ERROR: i32 = 4; +const EXIT_DB_WRITE_ERROR: i32 = 5; +const EXIT_TABLE_OPEN_ERROR: i32 = 6; +const EXIT_TABLE_CREATE_ERROR: i32 = 7; +const EXIT_COMMIT_ERROR: i32 = 8; +const EXIT_TRANSACTION_ERROR: i32 = 9; +const EXIT_NETWORK_MISMATCH: i32 = 10; +const EXIT_NO_NETWORK_INFO: i32 = 11; +const EXIT_DB_NOT_FOUND: i32 = 12; + +/// Wrapper for result status to implement Termination trait struct Status(Result<(), BrokerError>); impl Termination for Status { @@ -58,131 +75,238 @@ fn main() -> Status { log::debug!("Command-line arguments: {:#?}", &opts); match opts.command { - Some(Command::Init) => { - eprint!("Initializing ... "); - let index_path = opts.general.data_dir.join(PATH_INDEXDB); - match fs::exists(&index_path) { - Err(err) => { - eprintln!("unable to access path '{}': {err}", index_path.display()); - exit(1); - } - Ok(true) => { - eprintln!("index database directory already exists, cancelling"); - exit(2); - } - Ok(false) => {} - } - if let Err(err) = fs::create_dir_all(&opts.general.data_dir) { + Some(Command::Init) => initialize_database(&opts), + None => run_node(opts), + } +} + +/// Initialize a new database for the BP Node +fn initialize_database(opts: &Opts) -> Status { + eprint!("Initializing ... "); + + // Prepare the database path + let index_path = opts.general.data_dir.join(PATH_INDEXDB); + + // Check if database already exists + if let Err(err) = check_db_path(&index_path, false) { + return err; + } + + // Create data directory if needed + if let Err(err) = fs::create_dir_all(&opts.general.data_dir) { + eprintln!( + "Unable to create data directory at '{}'\n{err}", + opts.general.data_dir.display() + ); + exit(EXIT_DIR_CREATE_ERROR); + } + + // Create the database + let db = match Database::create(&index_path) { + Ok(db) => db, + Err(err) => { + eprintln!("Unable to create index database.\n{err}"); + exit(EXIT_DB_CREATE_ERROR); + } + }; + + // Initialize database with network information and create all tables + let network = opts.general.network; + initialize_db_tables(&db, network); + + eprintln!("Index database initialized for {} network, exiting", network); + Status(Ok(())) +} + +/// Run the BP Node service +fn run_node(opts: Opts) -> Status { + let conf = Config::from(opts); + let index_path = conf.data_dir.join(PATH_INDEXDB); + + // Check if database exists + if let Err(err) = check_db_path(&index_path, true) { + return err; + } + + // Verify network configuration + if let Err(err) = verify_network_configuration(&index_path, &conf.network) { + return err; + } + + // Start the broker service + Status(Broker::start(conf).and_then(|runtime| runtime.run())) +} + +/// Check if database path exists or not, depending on expected state +fn check_db_path(index_path: &Path, should_exist: bool) -> Result<(), Status> { + match fs::exists(index_path) { + Err(err) => { + eprintln!("Unable to access path '{}': {err}", index_path.display()); + exit(EXIT_PATH_ACCESS_ERROR); + } + Ok(exists) => { + if exists && !should_exist { + eprintln!("Index database directory already exists, cancelling"); + exit(EXIT_DB_EXISTS_ERROR); + } else if !exists && should_exist { eprintln!( - "unable to create data directory at '{}'\n{err}", - opts.general.data_dir.display() + "ERROR: Database not found! Please initialize with 'bpd init' command first." ); - exit(3); + exit(EXIT_DB_NOT_FOUND); } + } + } + Ok(()) +} - // Create the database - let db = match Database::create(&index_path) { - Ok(db) => db, - Err(err) => { - eprintln!("unable to create index database.\n{err}"); - exit(4); - } - }; - - // Initialize database with network information - let network = opts.general.network; - match db.begin_write() { - Ok(tx) => { - match tx.open_table(bpnode::db::TABLE_MAIN) { - Ok(mut main_table) => { - if let Err(err) = main_table - .insert(bpnode::REC_NETWORK, network.to_string().as_bytes()) - { - eprintln!("Failed to write network information to database: {err}"); - exit(5); - } - } - Err(err) => { - eprintln!("Failed to open main table in database: {err}"); - exit(6); - } - } +/// Initialize database tables +fn initialize_db_tables(db: &Database, network: Network) { + // It's necessary to open all tables with WriteTransaction to ensure they are created + // In ReDB, tables are only created when first opened with a WriteTransaction + // If later accessed with ReadTransaction without being created first, errors will occur + match db.begin_write() { + Ok(tx) => { + // Initialize main table with network information + initialize_main_table(&tx, network); - if let Err(err) = tx.commit() { - eprintln!("Failed to commit initial database transaction: {err}"); - exit(7); - } - } - Err(err) => { - eprintln!("Failed to begin database transaction: {err}"); - exit(8); - } + // Initialize all other tables by group + create_core_tables(&tx); + create_utxo_tables(&tx); + create_block_height_tables(&tx); + create_transaction_block_tables(&tx); + create_orphan_tables(&tx); + create_fork_tables(&tx); + + // Commit the transaction + if let Err(err) = tx.commit() { + eprintln!("Failed to commit initial database transaction: {err}"); + exit(EXIT_COMMIT_ERROR); } + } + Err(err) => { + eprintln!("Failed to begin database transaction: {err}"); + exit(EXIT_TRANSACTION_ERROR); + } + } +} - eprintln!("index database initialized for {} network, exiting", network); - Status(Ok(())) +/// Initialize the main table with network information +fn initialize_main_table(tx: &WriteTransaction, network: Network) { + match tx.open_table(bpnode::db::TABLE_MAIN) { + Ok(mut main_table) => { + if let Err(err) = main_table.insert(bpnode::REC_NETWORK, network.to_string().as_bytes()) + { + eprintln!("Failed to write network information to database: {err}"); + exit(EXIT_DB_WRITE_ERROR); + } + } + Err(err) => { + eprintln!("Failed to open main table in database: {err}"); + exit(EXIT_TABLE_OPEN_ERROR); } - None => { - let conf = Config::from(opts); - let index_path = conf.data_dir.join(PATH_INDEXDB); - - // Check if the database exists - if let Ok(true) = fs::exists(&index_path) { - // Open the database to check network configuration - match Database::open(&index_path) { - Ok(db) => { - // Check stored network matches configured network - if let Ok(tx) = db.begin_read() { - if let Ok(main_table) = tx.open_table(bpnode::db::TABLE_MAIN) { - if let Ok(Some(network_rec)) = main_table.get(bpnode::REC_NETWORK) { - let stored_network = - String::from_utf8_lossy(network_rec.value()); - if stored_network != conf.network.to_string() { - eprintln!("ERROR: Database network mismatch!"); - eprintln!("Configured network: {}", conf.network); - eprintln!("Database network: {}", stored_network); - eprintln!( - "Each BP-Node instance works with a single chain." - ); - eprintln!( - "To use a different network, create a separate \ - instance with a different data directory." - ); - exit(9); - } - log::info!( - "Database network matches configured network: {}", - stored_network - ); - } else { - // Network information not found in the database - eprintln!( - "ERROR: Database exists but doesn't contain network \ - information." - ); - eprintln!( - "Please reinitialize the database with 'bpd init' command." - ); - exit(10); - } - } + } +} + +/// Create core block and transaction tables +fn create_core_tables(tx: &WriteTransaction) { + log::info!("Creating core block and transaction tables..."); + create_table(tx, bpnode::db::TABLE_BLKS, "blocks"); + create_table(tx, bpnode::db::TABLE_TXIDS, "txids"); + create_table(tx, bpnode::db::TABLE_BLOCKIDS, "blockids"); + create_table(tx, bpnode::db::TABLE_TXES, "transactions"); +} + +/// Create UTXO and transaction relationship tables +fn create_utxo_tables(tx: &WriteTransaction) { + log::info!("Creating UTXO and transaction relationship tables..."); + create_table(tx, bpnode::db::TABLE_OUTS, "spends"); + create_table(tx, bpnode::db::TABLE_SPKS, "scripts"); + create_table(tx, bpnode::db::TABLE_UTXOS, "utxos"); +} + +/// Create block height mapping tables +fn create_block_height_tables(tx: &WriteTransaction) { + log::info!("Creating block height mapping tables..."); + create_table(tx, bpnode::db::TABLE_HEIGHTS, "block_heights"); + create_table(tx, bpnode::db::TABLE_BLOCK_HEIGHTS, "blockid_height"); +} + +/// Create transaction-block relationship tables +fn create_transaction_block_tables(tx: &WriteTransaction) { + log::info!("Creating transaction-block relationship tables..."); + create_table(tx, bpnode::db::TABLE_TX_BLOCKS, "tx_blocks"); + create_table(tx, bpnode::db::TABLE_BLOCK_TXS, "block_txs"); + create_table(tx, bpnode::db::TABLE_INPUTS, "inputs"); + create_table(tx, bpnode::db::TABLE_BLOCK_SPENDS, "block_spends"); +} + +/// Create orphan blocks tables +fn create_orphan_tables(tx: &WriteTransaction) { + log::info!("Creating orphan blocks tables..."); + create_table(tx, bpnode::db::TABLE_ORPHANS, "orphans"); + create_table(tx, bpnode::db::TABLE_ORPHAN_PARENTS, "orphan_parents"); +} + +/// Create fork management tables +fn create_fork_tables(tx: &WriteTransaction) { + log::info!("Creating fork management tables..."); + create_table(tx, bpnode::db::TABLE_FORKS, "forks"); + create_table(tx, bpnode::db::TABLE_FORK_TIPS, "fork_tips"); + create_table(tx, bpnode::db::TABLE_FORK_BLOCKS, "fork_blocks"); +} + +/// Generic function to create a table with error handling +fn create_table( + tx: &WriteTransaction, + table_def: TableDefinition, + table_name: &str, +) { + if let Err(err) = tx.open_table(table_def) { + eprintln!("Failed to create {} table: {err}", table_name); + exit(EXIT_TABLE_CREATE_ERROR); + } +} + +/// Verify that database network configuration matches the configured network +fn verify_network_configuration( + index_path: &Path, + configured_network: &Network, +) -> Result<(), Status> { + match Database::open(index_path) { + Ok(db) => { + if let Ok(tx) = db.begin_read() { + if let Ok(main_table) = tx.open_table(bpnode::db::TABLE_MAIN) { + if let Ok(Some(network_rec)) = main_table.get(bpnode::REC_NETWORK) { + let stored_network = String::from_utf8_lossy(network_rec.value()); + if stored_network != configured_network.to_string() { + eprintln!("ERROR: Database network mismatch!"); + eprintln!("Configured network: {}", configured_network); + eprintln!("Database network: {}", stored_network); + eprintln!("Each BP-Node instance works with a single chain."); + eprintln!( + "To use a different network, create a separate instance with a \ + different data directory." + ); + exit(EXIT_NETWORK_MISMATCH); } - } - Err(err) => { + log::info!( + "Database network matches configured network: {}", + stored_network + ); + } else { + // Network information not found in the database eprintln!( - "Warning: Could not open database to check network configuration: {}", - err + "ERROR: Database exists but doesn't contain network information." ); + eprintln!("Please reinitialize the database with 'bpd init' command."); + exit(EXIT_NO_NETWORK_INFO); } } - } else { - eprintln!( - "ERROR: Database not found! Please initialize with 'bpd init' command first." - ); - exit(11); } - - Status(Broker::start(conf).and_then(|runtime| runtime.run())) + } + Err(err) => { + eprintln!("Warning: Could not open database to check network configuration: {}", err); } } + Ok(()) } diff --git a/src/blocks.rs b/src/blocks.rs index 2086317..dc98817 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -190,11 +190,15 @@ impl BlockProcessor { let main = db .open_table(TABLE_MAIN) .map_err(BlockProcError::MainTable)?; - let rec = main - .get(REC_TXNO) - .map_err(BlockProcError::TxNoAbsent)? - .unwrap(); - TxNo::from_slice(rec.value()).map_err(BlockProcError::TxNoInvalid)? + + // Get current transaction number or use starting value if not found + match main.get(REC_TXNO).map_err(BlockProcError::TxNoAbsent)? { + Some(rec) => TxNo::from_slice(rec.value()).map_err(BlockProcError::TxNoInvalid)?, + None => { + log::debug!(target: NAME, "No transaction counter found, starting from zero"); + TxNo::start() + } + } }; let mut count = 0; @@ -1811,11 +1815,14 @@ impl BlockProcessor { let main = db .open_table(TABLE_MAIN) .map_err(BlockProcError::MainTable)?; - let rec = main - .get(REC_TXNO) - .map_err(BlockProcError::TxNoAbsent)? - .unwrap(); - TxNo::from_slice(rec.value()).map_err(BlockProcError::TxNoInvalid)? + // Get current transaction number or use starting value if not found + match main.get(REC_TXNO).map_err(BlockProcError::TxNoAbsent)? { + Some(rec) => TxNo::from_slice(rec.value()).map_err(BlockProcError::TxNoInvalid)?, + None => { + log::debug!(target: NAME, "No transaction counter found, starting from zero"); + TxNo::start() + } + } }; // Iterate through blocks to apply (should be in ascending height order) From fa5c01df0e40f366718ea22f81ffa60f070bce58 Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Sun, 20 Apr 2025 16:42:17 +0800 Subject: [PATCH 10/21] refactor: streamline fork information retrieval in BlockProcessor Signed-off-by: will-bitlightlabs --- client/src/main.rs | 4 +- src/blocks.rs | 151 ++++++++++++++++----------------------------- 2 files changed, 56 insertions(+), 99 deletions(-) diff --git a/client/src/main.rs b/client/src/main.rs index d1722e2..380a4ff 100644 --- a/client/src/main.rs +++ b/client/src/main.rs @@ -56,7 +56,9 @@ fn cb(reply: Response) { Response::Failure(failure) => { println!("Failure: {failure}"); } - Response::Pong(_noise) => {} + Response::Pong(_noise) => { + println!("Pong from server"); + } Response::Status(status) => { println!("{}", serde_yaml::to_string(&status).unwrap()); } diff --git a/src/blocks.rs b/src/blocks.rs index dc98817..e722256 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -1157,24 +1157,8 @@ impl BlockProcessor { fork_id: ForkId, ) -> Result<(), BlockProcError> { // Get fork information - let forks_table = db - .open_table(TABLE_FORKS) - .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; - - let fork_info = match forks_table - .get(fork_id) - .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? - { - Some(record) => record.value(), - None => { - return Err(BlockProcError::Custom(format!( - "Fork {} not found in database", - fork_id - ))); - } - }; - - let (_fork_start_height, _fork_start_block_id, _fork_tip_id, fork_height) = fork_info; + let (_fork_start_height, _fork_start_block_id, _fork_tip_id, fork_height) = + self.get_fork_info(db, fork_id)?; // Get main chain height let main_chain_height = self.get_main_chain_height(db)?; @@ -1211,24 +1195,8 @@ impl BlockProcessor { fork_id: ForkId, ) -> Result<(), BlockProcError> { // Get fork information - let forks_table = db - .open_table(TABLE_FORKS) - .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; - - let fork_info = match forks_table - .get(fork_id) - .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? - { - Some(record) => record.value(), - None => { - return Err(BlockProcError::Custom(format!( - "Fork {} not found in database", - fork_id - ))); - } - }; - - let (fork_start_height, _fork_start_block_id, fork_tip_id, fork_height) = fork_info; + let (fork_start_height, _fork_start_block_id, fork_tip_id, fork_height) = + self.get_fork_info(db, fork_id)?; log::info!( target: NAME, @@ -1417,28 +1385,16 @@ impl BlockProcessor { fork_id: ForkId, new_block_id: BlockId, ) -> Result<(), BlockProcError> { + // Get current fork info + let (start_height, start_block_id, old_tip_id, current_height) = + self.get_fork_info(db, fork_id)?; + let new_height = current_height + 1; + // Update the fork record let mut forks_table = db .open_table(TABLE_FORKS) .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; - // Get current fork info - let fork_info = match forks_table - .get(fork_id) - .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? - { - Some(record) => record.value(), - None => { - return Err(BlockProcError::Custom(format!( - "Fork {} not found in database", - fork_id - ))); - } - }; - - let (start_height, start_block_id, old_tip_id, current_height) = fork_info; - let new_height = current_height + 1; - // Update fork with new tip and height forks_table .insert(fork_id, (start_height, start_block_id, new_block_id, new_height)) @@ -1578,24 +1534,8 @@ impl BlockProcessor { // This is more complex as fork blocks aren't in the heights table yet // Get the tip block ID of the fork - let forks_table = db - .open_table(TABLE_FORKS) - .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; - - let fork_info = match forks_table - .get(fork_id) - .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? - { - Some(record) => record.value(), - None => { - return Err(BlockProcError::Custom(format!( - "Fork {} not found in database", - fork_id - ))); - } - }; - - let (_fork_start_height, _fork_start_block_id, fork_tip_id, fork_height) = fork_info; + let (_fork_start_height, _fork_start_block_id, fork_tip_id, fork_height) = + self.get_fork_info(db, fork_id)?; // We need to find all blocks from the tip down to the start height // Since they're not yet in the heights table, we need to traverse backwards @@ -1867,7 +1807,7 @@ impl BlockProcessor { // For fork blocks, txids may already be in the database with assigned txno // Check if this txid already exists - let txids_table = db + let mut txids_table = db .open_table(TABLE_TXIDS) .map_err(BlockProcError::TxidTable)?; @@ -1890,10 +1830,6 @@ impl BlockProcessor { // If this is a new transaction, store its mapping and data if existing_txno.is_none() { - // Store transaction ID to transaction number mapping - let mut txids_table = db - .open_table(TABLE_TXIDS) - .map_err(BlockProcError::TxidTable)?; txids_table .insert(txid.to_byte_array(), tx_txno) .map_err(BlockProcError::TxidStorage)?; @@ -2079,32 +2015,29 @@ impl BlockProcessor { applied_fork_id: ForkId, ) -> Result<(), BlockProcError> { // Get information about the applied fork - let forks_table = db - .open_table(TABLE_FORKS) - .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; - - let fork_info = match forks_table - .get(applied_fork_id) - .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? - { - Some(record) => record.value(), - None => { - // Fork already removed, nothing to do - return Ok(()); - } - }; - - let (_start_height, _start_block_id, _tip_id, fork_height) = fork_info; + let (_start_height, _start_block_id, _tip_id, fork_height) = + match self.get_fork_info(db, applied_fork_id) { + Ok(info) => info, + Err(BlockProcError::Custom(msg)) if msg.contains("not found") => { + // Fork already removed, nothing to do + return Ok(()); + } + Err(e) => return Err(e), + }; // Remove old forks that are now definitely invalid // Any fork that starts at a height less than the applied fork's height // and has not become the main chain by now should be removed + let mut forks_table = db + .open_table(TABLE_FORKS) + .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; + + let mut forks_to_remove = Vec::new(); + let iter = forks_table .iter() .map_err(|e| BlockProcError::Custom(format!("Forks iterator error: {}", e)))?; - let mut forks_to_remove = Vec::new(); - for entry in iter { let (fork_id, info) = entry.map_err(|e| BlockProcError::Custom(format!("Fork entry error: {}", e)))?; @@ -2125,10 +2058,6 @@ impl BlockProcessor { } // Now remove the outdated forks - let mut forks_table = db - .open_table(TABLE_FORKS) - .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; - let mut fork_tips_table = db .open_table(TABLE_FORK_TIPS) .map_err(|e| BlockProcError::Custom(format!("Fork tips table error: {}", e)))?; @@ -2179,6 +2108,32 @@ impl BlockProcessor { Ok(()) } + + /// Helper method to get fork information, reducing the need to repeatedly open the forks table + fn get_fork_info( + &self, + db: &WriteTransaction, + fork_id: ForkId, + ) -> Result<(u32, BlockId, BlockId, u32), BlockProcError> { + let forks_table = db + .open_table(TABLE_FORKS) + .map_err(|e| BlockProcError::Custom(format!("Forks table error: {}", e)))?; + + let fork_info = match forks_table + .get(fork_id) + .map_err(|e| BlockProcError::Custom(format!("Fork lookup error: {}", e)))? + { + Some(record) => record.value(), + None => { + return Err(BlockProcError::Custom(format!( + "Fork {} not found in database", + fork_id + ))); + } + }; + + Ok(fork_info) + } } #[derive(Debug, Display, Error, From)] From d10820c47214fed4a059ae3dc3e1dc41f95a90fb Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Tue, 22 Apr 2025 22:38:57 +0800 Subject: [PATCH 11/21] fix: resolve reorg, orphan block processing and database initialization issues Signed-off-by: will-bitlightlabs --- src/bin/bpd.rs | 117 +-------------------------------- src/blocks.rs | 171 ++++++++++++++++++++++++++++++++++++++---------- src/broker.rs | 7 +- src/db.rs | 118 ++++++++++++++++++++++++++++++++- src/importer.rs | 2 +- src/lib.rs | 2 +- 6 files changed, 258 insertions(+), 159 deletions(-) diff --git a/src/bin/bpd.rs b/src/bin/bpd.rs index d66b130..7fcaded 100644 --- a/src/bin/bpd.rs +++ b/src/bin/bpd.rs @@ -31,11 +31,11 @@ use std::path::Path; use std::process::{ExitCode, Termination, exit}; pub use bpnode; -use bpnode::{Broker, BrokerError, Config, PATH_INDEXDB}; +use bpnode::{Broker, BrokerError, Config, PATH_INDEXDB, initialize_db_tables}; use bpwallet::Network; use clap::Parser; use loglevel::LogLevel; -use redb::{Database, Key, TableDefinition, Value, WriteTransaction}; +use redb::Database; use crate::opts::{Command, Opts}; @@ -44,11 +44,6 @@ const EXIT_PATH_ACCESS_ERROR: i32 = 1; const EXIT_DB_EXISTS_ERROR: i32 = 2; const EXIT_DIR_CREATE_ERROR: i32 = 3; const EXIT_DB_CREATE_ERROR: i32 = 4; -const EXIT_DB_WRITE_ERROR: i32 = 5; -const EXIT_TABLE_OPEN_ERROR: i32 = 6; -const EXIT_TABLE_CREATE_ERROR: i32 = 7; -const EXIT_COMMIT_ERROR: i32 = 8; -const EXIT_TRANSACTION_ERROR: i32 = 9; const EXIT_NETWORK_MISMATCH: i32 = 10; const EXIT_NO_NETWORK_INFO: i32 = 11; const EXIT_DB_NOT_FOUND: i32 = 12; @@ -159,114 +154,6 @@ fn check_db_path(index_path: &Path, should_exist: bool) -> Result<(), Status> { Ok(()) } -/// Initialize database tables -fn initialize_db_tables(db: &Database, network: Network) { - // It's necessary to open all tables with WriteTransaction to ensure they are created - // In ReDB, tables are only created when first opened with a WriteTransaction - // If later accessed with ReadTransaction without being created first, errors will occur - match db.begin_write() { - Ok(tx) => { - // Initialize main table with network information - initialize_main_table(&tx, network); - - // Initialize all other tables by group - create_core_tables(&tx); - create_utxo_tables(&tx); - create_block_height_tables(&tx); - create_transaction_block_tables(&tx); - create_orphan_tables(&tx); - create_fork_tables(&tx); - - // Commit the transaction - if let Err(err) = tx.commit() { - eprintln!("Failed to commit initial database transaction: {err}"); - exit(EXIT_COMMIT_ERROR); - } - } - Err(err) => { - eprintln!("Failed to begin database transaction: {err}"); - exit(EXIT_TRANSACTION_ERROR); - } - } -} - -/// Initialize the main table with network information -fn initialize_main_table(tx: &WriteTransaction, network: Network) { - match tx.open_table(bpnode::db::TABLE_MAIN) { - Ok(mut main_table) => { - if let Err(err) = main_table.insert(bpnode::REC_NETWORK, network.to_string().as_bytes()) - { - eprintln!("Failed to write network information to database: {err}"); - exit(EXIT_DB_WRITE_ERROR); - } - } - Err(err) => { - eprintln!("Failed to open main table in database: {err}"); - exit(EXIT_TABLE_OPEN_ERROR); - } - } -} - -/// Create core block and transaction tables -fn create_core_tables(tx: &WriteTransaction) { - log::info!("Creating core block and transaction tables..."); - create_table(tx, bpnode::db::TABLE_BLKS, "blocks"); - create_table(tx, bpnode::db::TABLE_TXIDS, "txids"); - create_table(tx, bpnode::db::TABLE_BLOCKIDS, "blockids"); - create_table(tx, bpnode::db::TABLE_TXES, "transactions"); -} - -/// Create UTXO and transaction relationship tables -fn create_utxo_tables(tx: &WriteTransaction) { - log::info!("Creating UTXO and transaction relationship tables..."); - create_table(tx, bpnode::db::TABLE_OUTS, "spends"); - create_table(tx, bpnode::db::TABLE_SPKS, "scripts"); - create_table(tx, bpnode::db::TABLE_UTXOS, "utxos"); -} - -/// Create block height mapping tables -fn create_block_height_tables(tx: &WriteTransaction) { - log::info!("Creating block height mapping tables..."); - create_table(tx, bpnode::db::TABLE_HEIGHTS, "block_heights"); - create_table(tx, bpnode::db::TABLE_BLOCK_HEIGHTS, "blockid_height"); -} - -/// Create transaction-block relationship tables -fn create_transaction_block_tables(tx: &WriteTransaction) { - log::info!("Creating transaction-block relationship tables..."); - create_table(tx, bpnode::db::TABLE_TX_BLOCKS, "tx_blocks"); - create_table(tx, bpnode::db::TABLE_BLOCK_TXS, "block_txs"); - create_table(tx, bpnode::db::TABLE_INPUTS, "inputs"); - create_table(tx, bpnode::db::TABLE_BLOCK_SPENDS, "block_spends"); -} - -/// Create orphan blocks tables -fn create_orphan_tables(tx: &WriteTransaction) { - log::info!("Creating orphan blocks tables..."); - create_table(tx, bpnode::db::TABLE_ORPHANS, "orphans"); - create_table(tx, bpnode::db::TABLE_ORPHAN_PARENTS, "orphan_parents"); -} - -/// Create fork management tables -fn create_fork_tables(tx: &WriteTransaction) { - log::info!("Creating fork management tables..."); - create_table(tx, bpnode::db::TABLE_FORKS, "forks"); - create_table(tx, bpnode::db::TABLE_FORK_TIPS, "fork_tips"); - create_table(tx, bpnode::db::TABLE_FORK_BLOCKS, "fork_blocks"); -} - -/// Generic function to create a table with error handling -fn create_table( - tx: &WriteTransaction, - table_def: TableDefinition, - table_name: &str, -) { - if let Err(err) = tx.open_table(table_def) { - eprintln!("Failed to create {} table: {err}", table_name); - exit(EXIT_TABLE_CREATE_ERROR); - } -} - /// Verify that database network configuration matches the configured network fn verify_network_configuration( index_path: &Path, diff --git a/src/blocks.rs b/src/blocks.rs index e722256..ceb10bd 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -174,6 +174,24 @@ impl BlockProcessor { Ok(height) } + /// Check if the block hash already exists in the database + fn is_block_exists( + &self, + db: &WriteTransaction, + block_hash: &BlockHash, + ) -> Result { + let blockids_table = db + .open_table(TABLE_BLOCKIDS) + .map_err(|e| BlockProcError::Custom(format!("Block IDs table error: {}", e)))?; + + let exists = blockids_table + .get(block_hash.to_byte_array()) + .map_err(|e| BlockProcError::Custom(format!("Block hash lookup error: {}", e)))? + .is_some(); + + Ok(exists) + } + pub fn process_block(&mut self, id: BlockHash, block: Block) -> Result { // Store a copy of the parent hash for potential orphan block handling let parent_hash = block.header.prev_block_hash; @@ -185,8 +203,18 @@ impl BlockProcessor { self.db.send(DbMsg::Write(tx))?; let db = rx.recv()?; + // Check if the block already exists + if self.is_block_exists(&db, &id)? { + log::warn!( + target: NAME, + "Block {} already exists in database, skipping processing", + id + ); + return Err(BlockProcError::Custom(format!("Block {} already exists", id))); + } + // Get current transaction number - let mut txno = { + let mut txno_counter = { let main = db .open_table(TABLE_MAIN) .map_err(BlockProcError::MainTable)?; @@ -256,16 +284,29 @@ impl BlockProcessor { // Process transactions in the block for tx in block.transactions { + // Store transaction ID to transaction number mapping + let mut txids_table = db + .open_table(TABLE_TXIDS) + .map_err(BlockProcError::TxidTable)?; + + // Get txno from TABLE_TXIDS using txid. If it doesn't exist, use txno-counter, + // otherwise use the existing txno. This is mainly to avoid issues after block + // reorganization, where the same txid in different blocks could be + // assigned different txno values, leading to incorrect processing + let txid = tx.txid(); - txno.inc_assign(); + let txno = txids_table + .get(txid.to_byte_array()) + .map_err(BlockProcError::TxidLookup)? + .map(|v| v.value()) + .unwrap_or_else(|| { + txno_counter.inc_assign(); + txno_counter + }); // Add transaction to the list for this block block_txs.push(txno); - // Store transaction ID to transaction number mapping - let mut txids_table = db - .open_table(TABLE_TXIDS) - .map_err(BlockProcError::TxidTable)?; txids_table .insert(txid.to_byte_array(), txno) .map_err(BlockProcError::TxidStorage)?; @@ -412,7 +453,7 @@ impl BlockProcessor { .map_err(BlockProcError::MainTable)?; // Update transaction counter - main.insert(REC_TXNO, txno.to_byte_array().as_slice()) + main.insert(REC_TXNO, txno_counter.to_byte_array().as_slice()) .map_err(BlockProcError::TxNoUpdate)?; // Log successful block processing @@ -440,7 +481,7 @@ impl BlockProcessor { Ok(count) } - Err(BlockProcError::OrphanBlock(_)) => { + Err(BlockProcError::OrphanBlock(e)) => { // Handle orphan block case if let Err(err) = db.abort() { log::warn!(target: NAME, "Unable to abort failed database transaction due to {err}"); @@ -454,7 +495,8 @@ impl BlockProcessor { id ); - return self.save_orphan_block(id, block_clone); + self.save_orphan_block(id, block_clone)?; + return Err(BlockProcError::OrphanBlock(e)); } Err(BlockProcError::PotentialFork(new_block_hash, height, existing_blockid)) => { // Handle potential fork case - conflict with existing block at same height @@ -465,13 +507,15 @@ impl BlockProcessor { // Record this as a potential fork for later verification // Store the new block but don't update the height tables yet // We'll only perform a reorganization if this fork becomes the longest chain - self.process_potential_fork( + let result = self.process_potential_fork( id, &block_clone, Some(height), Some(existing_blockid), )?; + debug_assert!(result.is_none()); + return Err(BlockProcError::PotentialFork( new_block_hash, height, @@ -491,9 +535,14 @@ impl BlockProcessor { parent_hash ); - self.process_potential_fork(id, &block_clone, None, None)?; + // If a chain reorganization occurs, return the number of transactions added + if let Some(txs_added) = + self.process_potential_fork(id, &block_clone, None, None)? + { + return Ok(txs_added); + } - return Ok(0); + return Err(BlockProcError::ForkChainExtension(block_hash, parent_hash)); } Err(e) => { // Handle other errors @@ -510,13 +559,6 @@ impl BlockProcessor { /// /// This method should be used instead of directly calling `process_block` when you want to /// ensure that orphan blocks dependent on the processed block are also handled. - /// - /// # Example - /// ```no_run - /// let processor = BlockProcessor::new(db, broker); - /// // Process a block and its dependent orphans - /// processor.process_block_and_orphans(block_hash, block)?; - /// ``` pub fn process_block_and_orphans( &mut self, id: BlockHash, @@ -1059,7 +1101,7 @@ impl BlockProcessor { block: &Block, height: Option, existing_blockid: Option, - ) -> Result<(), BlockProcError> { + ) -> Result, BlockProcError> { let (tx, rx) = crossbeam_channel::bounded(1); self.db.send(DbMsg::Write(tx))?; let db = rx.recv()?; @@ -1127,7 +1169,7 @@ impl BlockProcessor { block_hash, block.header.prev_block_hash ); - return Ok(()); + return Ok(None); } self.record_fork( @@ -1143,11 +1185,11 @@ impl BlockProcessor { }; // Check if this fork is now longer than the main chain - self.check_fork_length(&db, fork_id)?; + let txs_added = self.check_fork_length(&db, fork_id)?; db.commit()?; - Ok(()) + Ok(txs_added) } /// Check if a fork is longer than the main chain and perform reorganization if needed @@ -1155,7 +1197,7 @@ impl BlockProcessor { &mut self, db: &WriteTransaction, fork_id: ForkId, - ) -> Result<(), BlockProcError> { + ) -> Result, BlockProcError> { // Get fork information let (_fork_start_height, _fork_start_block_id, _fork_tip_id, fork_height) = self.get_fork_info(db, fork_id)?; @@ -1174,7 +1216,8 @@ impl BlockProcessor { ); // Perform chain reorganization - self.perform_chain_reorganization(db, fork_id)?; + let txs_added = self.perform_chain_reorganization(db, fork_id)?; + return Ok(Some(txs_added)); } else { log::debug!( target: NAME, @@ -1185,7 +1228,7 @@ impl BlockProcessor { ); } - Ok(()) + Ok(None) } /// Perform a chain reorganization to adopt a fork as the new main chain @@ -1193,7 +1236,7 @@ impl BlockProcessor { &mut self, db: &WriteTransaction, fork_id: ForkId, - ) -> Result<(), BlockProcError> { + ) -> Result { // Get fork information let (fork_start_height, _fork_start_block_id, fork_tip_id, fork_height) = self.get_fork_info(db, fork_id)?; @@ -1230,7 +1273,7 @@ impl BlockProcessor { self.rollback_blocks(db, &blocks_to_rollback)?; // 5. Apply blocks from fork chain - self.apply_blocks(db, &blocks_to_apply)?; + let txs_added = self.apply_blocks(db, &blocks_to_apply)?; // 6. Update fork status self.cleanup_after_reorg(db, fork_id)?; @@ -1241,7 +1284,7 @@ impl BlockProcessor { fork_height ); - Ok(()) + Ok(txs_added) } /// Records a potential fork in the blockchain. @@ -1620,6 +1663,10 @@ impl BlockProcessor { return Ok(()); } + let mut total_txs_removed = 0; + let mut total_utxos_restored = 0; + let mut total_utxos_removed = 0; + // Iterate through blocks to roll back (should be in descending height order) for &(height, block_id) in blocks { log::info!( @@ -1629,6 +1676,10 @@ impl BlockProcessor { block_id ); + let mut block_utxos_restored = 0; + let mut block_utxos_removed = 0; + let mut block_txs_removed = 0; + // 1. Restore UTXOs spent in this block let block_spends_table = db .open_table(TABLE_BLOCK_SPENDS) @@ -1639,6 +1690,8 @@ impl BlockProcessor { .map_err(|e| BlockProcError::Custom(format!("Block spends lookup error: {}", e)))? { let spends = spends_record.value(); + block_utxos_restored = spends.len(); + total_utxos_restored += block_utxos_restored; // Restore each spent UTXO let mut utxos_table = db @@ -1669,6 +1722,8 @@ impl BlockProcessor { .map_err(|e| BlockProcError::Custom(format!("Block-txs lookup error: {}", e)))? { let txs = txs_record.value(); + block_txs_removed = txs.len(); + total_txs_removed += block_txs_removed; // For each transaction for txno in txs { @@ -1683,6 +1738,8 @@ impl BlockProcessor { { let tx = tx_record.value(); let num_outputs = tx.as_ref().outputs.len(); + block_utxos_removed += num_outputs; + total_utxos_removed += num_outputs; let mut utxos_table = db.open_table(TABLE_UTXOS).map_err(|e| { BlockProcError::Custom(format!("UTXOs table error: {}", e)) @@ -1727,12 +1784,24 @@ impl BlockProcessor { height, block_id ); + + log::info!( + target: NAME, + "Block rollback stats for height {}: removed {} transactions, restored {} UTXOs, removed {} UTXOs", + height, + block_txs_removed, + block_utxos_restored, + block_utxos_removed + ); } log::info!( target: NAME, - "Successfully rolled back {} blocks", - blocks.len() + "Successfully rolled back {} blocks: removed {} transactions, restored {} UTXOs, removed {} UTXOs", + blocks.len(), + total_txs_removed, + total_utxos_restored, + total_utxos_removed ); Ok(()) @@ -1745,9 +1814,9 @@ impl BlockProcessor { &self, db: &WriteTransaction, blocks: &[(u32, BlockId)], - ) -> Result<(), BlockProcError> { + ) -> Result { if blocks.is_empty() { - return Ok(()); + return Ok(0); } // Get current transaction number - we'll need this for processing new transactions @@ -1765,6 +1834,10 @@ impl BlockProcessor { } }; + let mut total_txs_added = 0; + let mut total_utxos_added = 0; + let mut total_utxos_spent = 0; + // Iterate through blocks to apply (should be in ascending height order) for &(height, block_id) in blocks { log::info!( @@ -1795,6 +1868,10 @@ impl BlockProcessor { block_id ); + let mut block_txs_added: usize = 0; + let mut block_utxos_added: usize = 0; + let mut block_utxos_spent: usize = 0; + // Track UTXOs spent in this block let mut block_spends = Vec::new(); @@ -1841,6 +1918,8 @@ impl BlockProcessor { txes_table .insert(tx_txno, DbTx::from(tx.clone())) .map_err(BlockProcError::TxesStorage)?; + + block_txs_added += 1; } // Associate transaction with block ID (update even if transaction existed) @@ -1873,6 +1952,8 @@ impl BlockProcessor { BlockProcError::Custom(format!("UTXOs removal error: {}", e)) })?; + block_utxos_spent += 1; + // Record UTXO spent in this block block_spends.push((prev_txno, prev_vout.into_u32())); @@ -1924,6 +2005,8 @@ impl BlockProcessor { BlockProcError::Custom(format!("UTXOs storage error: {}", e)) })?; + block_utxos_added += 1; + // Index script pubkey let script = &output.script_pubkey; if !script.is_empty() { @@ -1990,6 +2073,19 @@ impl BlockProcessor { height, block_id ); + + total_txs_added += block_txs_added; + total_utxos_added += block_utxos_added; + total_utxos_spent += block_utxos_spent; + + log::info!( + target: NAME, + "Block apply stats for height {}: added {} transactions, added {} UTXOs, spent {} UTXOs", + height, + block_txs_added, + block_utxos_added, + block_utxos_spent + ); } // Update the global transaction counter @@ -2001,11 +2097,14 @@ impl BlockProcessor { log::info!( target: NAME, - "Successfully applied {} blocks with all transactions", - blocks.len() + "Successfully applied {} blocks: added {} transactions, added {} UTXOs, spent {} UTXOs", + blocks.len(), + total_txs_added, + total_utxos_added, + total_utxos_spent ); - Ok(()) + Ok(total_txs_added) } /// Clean up fork information after a successful reorganization diff --git a/src/broker.rs b/src/broker.rs index 594d299..7d614ae 100644 --- a/src/broker.rs +++ b/src/broker.rs @@ -161,9 +161,10 @@ impl Broker { match msg { ImporterMsg::Mined(txid) => { for (remote, filters) in &self.tracking { - // TODO: Check against Bloom filters - if false { - self.rpc.cmd(RpcCmd::Send(*remote, Response::Mined(txid)))?; + for filter in filters { + if filter.contains(txid) { + self.rpc.cmd(RpcCmd::Send(*remote, Response::Mined(txid)))?; + } } } } diff --git a/src/db.rs b/src/db.rs index 1243f95..0a2b66c 100644 --- a/src/db.rs +++ b/src/db.rs @@ -24,15 +24,16 @@ use std::cmp::Ordering; use std::ops::ControlFlow; use std::path::Path; +use std::process::exit; use amplify::num::u40; use amplify::{ByteArray, FromSliceError}; -use bpwallet::{Block, BlockHeader, ConsensusDecode, ConsensusEncode, Tx}; +use bpwallet::{Block, BlockHeader, ConsensusDecode, ConsensusEncode, Network, Tx}; use crossbeam_channel::{SendError, Sender}; use microservices::UService; use redb::{ - Database, DatabaseError, ReadTransaction, TableDefinition, TransactionError, TypeName, - WriteTransaction, + Database, DatabaseError, Key, ReadTransaction, TableDefinition, TransactionError, TypeName, + Value, WriteTransaction, }; #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Display)] @@ -50,6 +51,8 @@ impl TxNo { pub fn start() -> Self { TxNo(u40::ZERO) } pub fn inc_assign(&mut self) { self.0 += u40::ONE } + + pub fn into_inner(self) -> u40 { self.0 } } impl Id { @@ -70,6 +73,8 @@ impl Id { array.copy_from_slice(bytes); Id(u32::from_be_bytes(array)) } + + pub fn into_inner(self) -> u32 { self.0 } } impl ByteArray<5> for TxNo { @@ -343,3 +348,110 @@ impl UService for IndexDb { } } } + +/// Initialize database tables +pub fn initialize_db_tables(db: &Database, network: Network) { + // It's necessary to open all tables with WriteTransaction to ensure they are created + // In ReDB, tables are only created when first opened with a WriteTransaction + // If later accessed with ReadTransaction without being created first, errors will occur + match db.begin_write() { + Ok(tx) => { + // Initialize main table with network information + initialize_main_table(&tx, network); + + // Initialize all other tables by group + create_core_tables(&tx); + create_utxo_tables(&tx); + create_block_height_tables(&tx); + create_transaction_block_tables(&tx); + create_orphan_tables(&tx); + create_fork_tables(&tx); + + // Commit the transaction + if let Err(err) = tx.commit() { + eprintln!("Failed to commit initial database transaction: {err}"); + exit(8); + } + } + Err(err) => { + eprintln!("Failed to begin database transaction: {err}"); + exit(9); + } + } +} + +/// Initialize the main table with network information +fn initialize_main_table(tx: &WriteTransaction, network: Network) { + match tx.open_table(TABLE_MAIN) { + Ok(mut main_table) => { + if let Err(err) = main_table.insert(REC_NETWORK, network.to_string().as_bytes()) { + eprintln!("Failed to write network information to database: {err}"); + exit(5); + } + } + Err(err) => { + eprintln!("Failed to open main table in database: {err}"); + exit(6); + } + } +} + +/// Create core block and transaction tables +fn create_core_tables(tx: &WriteTransaction) { + log::info!("Creating core block and transaction tables..."); + create_table(tx, TABLE_BLKS, "blocks"); + create_table(tx, TABLE_TXIDS, "txids"); + create_table(tx, TABLE_BLOCKIDS, "blockids"); + create_table(tx, TABLE_TXES, "transactions"); +} + +/// Create UTXO and transaction relationship tables +fn create_utxo_tables(tx: &WriteTransaction) { + log::info!("Creating UTXO and transaction relationship tables..."); + create_table(tx, TABLE_OUTS, "spends"); + create_table(tx, TABLE_SPKS, "scripts"); + create_table(tx, TABLE_UTXOS, "utxos"); +} + +/// Create block height mapping tables +fn create_block_height_tables(tx: &WriteTransaction) { + log::info!("Creating block height mapping tables..."); + create_table(tx, TABLE_HEIGHTS, "block_heights"); + create_table(tx, TABLE_BLOCK_HEIGHTS, "blockid_height"); +} + +/// Create transaction-block relationship tables +fn create_transaction_block_tables(tx: &WriteTransaction) { + log::info!("Creating transaction-block relationship tables..."); + create_table(tx, TABLE_TX_BLOCKS, "tx_blocks"); + create_table(tx, TABLE_BLOCK_TXS, "block_txs"); + create_table(tx, TABLE_INPUTS, "inputs"); + create_table(tx, TABLE_BLOCK_SPENDS, "block_spends"); +} + +/// Create orphan blocks tables +fn create_orphan_tables(tx: &WriteTransaction) { + log::info!("Creating orphan blocks tables..."); + create_table(tx, TABLE_ORPHANS, "orphans"); + create_table(tx, TABLE_ORPHAN_PARENTS, "orphan_parents"); +} + +/// Create fork management tables +fn create_fork_tables(tx: &WriteTransaction) { + log::info!("Creating fork management tables..."); + create_table(tx, TABLE_FORKS, "forks"); + create_table(tx, TABLE_FORK_TIPS, "fork_tips"); + create_table(tx, TABLE_FORK_BLOCKS, "fork_blocks"); +} + +/// Generic function to create a table with error handling +fn create_table( + tx: &WriteTransaction, + table_def: TableDefinition, + table_name: &str, +) { + if let Err(err) = tx.open_table(table_def) { + eprintln!("Failed to create {} table: {err}", table_name); + exit(7); + } +} diff --git a/src/importer.rs b/src/importer.rs index 8416c78..dc1b2eb 100644 --- a/src/importer.rs +++ b/src/importer.rs @@ -155,7 +155,7 @@ impl ServiceController for BlockI ExporterPub::Block(block) => { let block_id = block.header.block_hash(); log::debug!("Received block {block_id} from {remote}"); - match self.processor.process_block(block_id, block) { + match self.processor.process_block_and_orphans(block_id, block) { Err(err) => { log::error!(target: NAME, "{err}"); log::warn!(target: NAME, "Block {block_id} got dropped due to database connectivity issue"); diff --git a/src/lib.rs b/src/lib.rs index 9af7b63..2d6d36e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -41,7 +41,7 @@ mod importer; pub use blocks::{BlockProcError, BlockProcessor}; pub use broker::{Broker, BrokerError, BrokerRpcMsg, PATH_INDEXDB, TrackReq}; pub use config::Config; -pub use db::REC_NETWORK; +pub use db::{REC_NETWORK, initialize_db_tables}; pub use importer::{BlockImporter, ImporterCmd, ImporterMsg}; pub use rpc::{RpcCmd, RpcController}; //pub use query::{QueryWorker, QueryReq, QueryResp}; From 6023042f93ff18c7cb68e241f61deac3c9c0086a Mon Sep 17 00:00:00 2001 From: Dr Maxim Orlovsky Date: Thu, 24 Apr 2025 12:47:53 +0200 Subject: [PATCH 12/21] ci: fix macos build --- Cargo.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 44a3e7a..59f5bcc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1655,14 +1655,14 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32f05bccc8b6036fec4e0c511954e3997987a82acb6a0b50642ecf7c744fe225" dependencies = [ - "parse_arg 1.0.0", + "parse_arg 1.0.1", ] [[package]] name = "parse_arg" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aa7e02eed7573816a0edb0d7f7aed7cdd59a22d101c3e9dc5e5ea3b935d3346" +checksum = "5bddc33f680b79eaf1e2e56da792c3c2236f86985bbc3a886e8ddee17ae4d3a4" [[package]] name = "paste" From b7916f82adc07d45ab908cbfb3b94002972d8b91 Mon Sep 17 00:00:00 2001 From: Dr Maxim Orlovsky Date: Thu, 24 Apr 2025 12:48:05 +0200 Subject: [PATCH 13/21] bpd: streamline verify_network_configuration code --- src/bin/bpd.rs | 80 +++++++++++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 43 deletions(-) diff --git a/src/bin/bpd.rs b/src/bin/bpd.rs index 7fcaded..4503bbc 100644 --- a/src/bin/bpd.rs +++ b/src/bin/bpd.rs @@ -124,9 +124,7 @@ fn run_node(opts: Opts) -> Status { } // Verify network configuration - if let Err(err) = verify_network_configuration(&index_path, &conf.network) { - return err; - } + verify_network_configuration(&index_path, &conf.network); // Start the broker service Status(Broker::start(conf).and_then(|runtime| runtime.run())) @@ -155,45 +153,41 @@ fn check_db_path(index_path: &Path, should_exist: bool) -> Result<(), Status> { } /// Verify that database network configuration matches the configured network -fn verify_network_configuration( - index_path: &Path, - configured_network: &Network, -) -> Result<(), Status> { - match Database::open(index_path) { - Ok(db) => { - if let Ok(tx) = db.begin_read() { - if let Ok(main_table) = tx.open_table(bpnode::db::TABLE_MAIN) { - if let Ok(Some(network_rec)) = main_table.get(bpnode::REC_NETWORK) { - let stored_network = String::from_utf8_lossy(network_rec.value()); - if stored_network != configured_network.to_string() { - eprintln!("ERROR: Database network mismatch!"); - eprintln!("Configured network: {}", configured_network); - eprintln!("Database network: {}", stored_network); - eprintln!("Each BP-Node instance works with a single chain."); - eprintln!( - "To use a different network, create a separate instance with a \ - different data directory." - ); - exit(EXIT_NETWORK_MISMATCH); - } - log::info!( - "Database network matches configured network: {}", - stored_network - ); - } else { - // Network information not found in the database - eprintln!( - "ERROR: Database exists but doesn't contain network information." - ); - eprintln!("Please reinitialize the database with 'bpd init' command."); - exit(EXIT_NO_NETWORK_INFO); - } - } - } - } - Err(err) => { - eprintln!("Warning: Could not open database to check network configuration: {}", err); - } +fn verify_network_configuration(index_path: &Path, configured_network: &Network) { + let Ok(db) = Database::open(index_path) + .inspect_err(|err| eprintln!("Error: could not open the database due to {err}")) + else { + exit(EXIT_DB_OPEN_ERROR) + }; + let Ok(tx) = db + .begin_read() + .inspect_err(|err| eprintln!("Error: could not access the database due to {err}")) + else { + exit(EXIT_DB_OPEN_ERROR) + }; + let Ok(main_table) = tx + .open_table(bpnode::db::TABLE_MAIN) + .inspect_err(|err| eprintln!("Error: could not open the main table due to {err}")) + else { + exit(EXIT_DB_OPEN_ERROR) + }; + let Ok(Some(network_rec)) = main_table.get(bpnode::REC_NETWORK) else { + // Network information isn't found in the database + eprintln!("ERROR: Database exists but doesn't contain network information."); + eprintln!("Please reinitialize the database with `bpd init` command."); + exit(EXIT_NO_NETWORK_INFO); + }; + let stored_network = String::from_utf8_lossy(network_rec.value()); + if stored_network != configured_network.to_string() { + eprintln!("ERROR: Database network mismatch!"); + eprintln!("Configured network: {}", configured_network); + eprintln!("Database network: {}", stored_network); + eprintln!("Each BP-Node instance works with a single chain."); + eprintln!( + "To use a different network, create a separate instance with a different data \ + directory." + ); + exit(EXIT_NETWORK_MISMATCH); } - Ok(()) + log::info!("Database network matches configured network: {}", stored_network); } From 99d365cdecda606c8f9dda91b073b6c70361f3f1 Mon Sep 17 00:00:00 2001 From: Dr Maxim Orlovsky Date: Thu, 24 Apr 2025 12:48:24 +0200 Subject: [PATCH 14/21] db: systematize exit constants --- src/bin/bpd.rs | 4 +++- src/db.rs | 15 ++++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/bin/bpd.rs b/src/bin/bpd.rs index 4503bbc..0a6d03b 100644 --- a/src/bin/bpd.rs +++ b/src/bin/bpd.rs @@ -39,11 +39,13 @@ use redb::Database; use crate::opts::{Command, Opts}; -/// Exit status codes for different error conditions +// Exit status codes for different error conditions +// see also constants in `db.rs` const EXIT_PATH_ACCESS_ERROR: i32 = 1; const EXIT_DB_EXISTS_ERROR: i32 = 2; const EXIT_DIR_CREATE_ERROR: i32 = 3; const EXIT_DB_CREATE_ERROR: i32 = 4; +const EXIT_DB_OPEN_ERROR: i32 = 5; const EXIT_NETWORK_MISMATCH: i32 = 10; const EXIT_NO_NETWORK_INFO: i32 = 11; const EXIT_DB_NOT_FOUND: i32 = 12; diff --git a/src/db.rs b/src/db.rs index 0a2b66c..846ad78 100644 --- a/src/db.rs +++ b/src/db.rs @@ -36,6 +36,11 @@ use redb::{ Value, WriteTransaction, }; +// see also constants in `bin/bpd.rs` +const EXIT_DB_INIT_MAIN_TABLE: i32 = 6; +const EXIT_DB_INIT_TABLE: i32 = 7; +const EXIT_DB_INIT_ERROR: i32 = 8; + #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Display)] #[display("#{0:010X}")] pub struct TxNo(u40); @@ -370,12 +375,12 @@ pub fn initialize_db_tables(db: &Database, network: Network) { // Commit the transaction if let Err(err) = tx.commit() { eprintln!("Failed to commit initial database transaction: {err}"); - exit(8); + exit(EXIT_DB_INIT_ERROR); } } Err(err) => { eprintln!("Failed to begin database transaction: {err}"); - exit(9); + exit(EXIT_DB_INIT_ERROR); } } } @@ -386,12 +391,12 @@ fn initialize_main_table(tx: &WriteTransaction, network: Network) { Ok(mut main_table) => { if let Err(err) = main_table.insert(REC_NETWORK, network.to_string().as_bytes()) { eprintln!("Failed to write network information to database: {err}"); - exit(5); + exit(EXIT_DB_INIT_MAIN_TABLE); } } Err(err) => { eprintln!("Failed to open main table in database: {err}"); - exit(6); + exit(EXIT_DB_INIT_MAIN_TABLE); } } } @@ -452,6 +457,6 @@ fn create_table( ) { if let Err(err) = tx.open_table(table_def) { eprintln!("Failed to create {} table: {err}", table_name); - exit(7); + exit(EXIT_DB_INIT_TABLE); } } From 2360202bf0ffd3ac229e4c638b95efd0bdfae1bc Mon Sep 17 00:00:00 2001 From: Dr Maxim Orlovsky Date: Thu, 24 Apr 2025 12:48:45 +0200 Subject: [PATCH 15/21] chore: small comment fixes --- src/blocks.rs | 1 + src/config.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/src/blocks.rs b/src/blocks.rs index ceb10bd..a814814 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -46,6 +46,7 @@ use crate::db::{ const NAME: &str = "blockproc"; +// TODO: Make this configuration options // Constants for orphan block management const MAX_ORPHAN_BLOCKS: usize = 100; // Orphan blocks expire after 24 hours diff --git a/src/config.rs b/src/config.rs index 755bea2..bace821 100644 --- a/src/config.rs +++ b/src/config.rs @@ -36,6 +36,7 @@ pub struct Config { pub data_dir: PathBuf, /// Bitcoin network type (mainnet, testnet, etc.) + /// /// Each BP-Node instance is designed to work with a single network type. /// To work with multiple networks, create separate instances with different data directories. pub network: Network, From 5419f8cbec3b1e0ba7c6be6bab5adbf137313c4c Mon Sep 17 00:00:00 2001 From: Dr Maxim Orlovsky Date: Thu, 24 Apr 2025 12:51:32 +0200 Subject: [PATCH 16/21] chore: fix some of the clippy lints --- src/blocks.rs | 17 ++++++----------- src/broker.rs | 6 +++--- src/importer.rs | 2 +- src/rpc.rs | 2 +- 4 files changed, 11 insertions(+), 16 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index a814814..5291c80 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -497,7 +497,7 @@ impl BlockProcessor { ); self.save_orphan_block(id, block_clone)?; - return Err(BlockProcError::OrphanBlock(e)); + Err(BlockProcError::OrphanBlock(e)) } Err(BlockProcError::PotentialFork(new_block_hash, height, existing_blockid)) => { // Handle potential fork case - conflict with existing block at same height @@ -517,11 +517,7 @@ impl BlockProcessor { debug_assert!(result.is_none()); - return Err(BlockProcError::PotentialFork( - new_block_hash, - height, - existing_blockid, - )); + Err(BlockProcError::PotentialFork(new_block_hash, height, existing_blockid)) } Err(BlockProcError::ForkChainExtension(block_hash, parent_hash)) => { // Handle fork chain extension case - parent block is part of a fork @@ -543,14 +539,14 @@ impl BlockProcessor { return Ok(txs_added); } - return Err(BlockProcError::ForkChainExtension(block_hash, parent_hash)); + Err(BlockProcError::ForkChainExtension(block_hash, parent_hash)) } Err(e) => { // Handle other errors if let Err(err) = db.abort() { log::warn!(target: NAME, "Unable to abort failed database transaction due to {err}"); }; - return Err(e); + Err(e) } } } @@ -978,7 +974,7 @@ impl BlockProcessor { // Check if orphan has expired if timestamp < expiry_threshold { - expired_orphans.push(orphan_hash.value().clone()); + expired_orphans.push(orphan_hash.value()); } } @@ -1024,8 +1020,7 @@ impl BlockProcessor { })?; // Store parent data for later processing - parents_to_scan - .push((parent_hash.value().clone(), orphans.value().to_vec())); + parents_to_scan.push((parent_hash.value(), orphans.value().to_vec())); } // Now process parents without borrowing the table diff --git a/src/broker.rs b/src/broker.rs index 7d614ae..de705ef 100644 --- a/src/broker.rs +++ b/src/broker.rs @@ -72,7 +72,7 @@ impl Broker { const TIMEOUT: Option = Some(Duration::from_secs(60 * 10)); log::info!("Starting database managing thread..."); - let indexdb = IndexDb::new(&conf.data_dir.join(PATH_INDEXDB))?; + let indexdb = IndexDb::new(conf.data_dir.join(PATH_INDEXDB))?; let db = UThread::new(indexdb, TIMEOUT); log::info!("Starting block importer thread..."); @@ -81,7 +81,7 @@ impl Broker { let listen = conf.import.iter().map(|addr| { NetAccept::bind(addr).unwrap_or_else(|err| panic!("unable to bind to {addr}: {err}")) }); - let importer = service::Runtime::new(conf.import[0].clone(), controller, listen) + let importer = service::Runtime::new(conf.import[0], controller, listen) .map_err(|err| BrokerError::Import(err.into()))?; log::info!("Starting RPC server thread..."); @@ -90,7 +90,7 @@ impl Broker { let listen = conf.rpc.iter().map(|addr| { NetAccept::bind(addr).unwrap_or_else(|err| panic!("unable to bind to {addr}: {err}")) }); - let rpc = service::Runtime::new(conf.rpc[0].clone(), controller, listen) + let rpc = service::Runtime::new(conf.rpc[0], controller, listen) .map_err(|err| BrokerError::Rpc(err.into()))?; log::info!("Launch completed successfully"); diff --git a/src/importer.rs b/src/importer.rs index dc1b2eb..66a4b66 100644 --- a/src/importer.rs +++ b/src/importer.rs @@ -120,7 +120,7 @@ impl ServiceController for BlockI let client = self.providers.remove(&addr).unwrap_or_else(|| { panic!("Block provider at {addr} got disconnected but not found in providers list"); }); - log::warn!(target: NAME, "Block provider at {addr} got disconnected due to {reason} ({})", client.agent.map(|a| a.to_string()).unwrap_or(none!())); + log::warn!(target: NAME, "Block provider at {addr} got disconnected due to {reason} ({})", client.agent.map(|a| a.to_string()).unwrap_or_default()); } fn on_command(&mut self, cmd: ImporterCmd) { diff --git a/src/rpc.rs b/src/rpc.rs index e5e9a84..adf830e 100644 --- a/src/rpc.rs +++ b/src/rpc.rs @@ -110,7 +110,7 @@ impl ServiceController for RpcControll let client = self.clients.remove(&remote).unwrap_or_else(|| { panic!("Client at {remote} got disconnected but not found in providers list"); }); - log::warn!(target: NAME, "Client at {remote} got disconnected due to {reason} ({})", client.agent.map(|a| a.to_string()).unwrap_or(none!())); + log::warn!(target: NAME, "Client at {remote} got disconnected due to {reason} ({})", client.agent.map(|a| a.to_string()).unwrap_or_default()); self.broker .send(BrokerRpcMsg::UntrackAll(remote)) .expect("Unable to communicate to broker"); From 149e4b0671bea20306df392c8847ef1987dae682 Mon Sep 17 00:00:00 2001 From: Dr Maxim Orlovsky Date: Thu, 24 Apr 2025 12:53:50 +0200 Subject: [PATCH 17/21] ci: fix no-defaults build --- client/src/client.rs | 6 +++--- client/src/exporter.rs | 9 +++++++++ 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/client/src/client.rs b/client/src/client.rs index 2bb9223..f04cb6f 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -64,7 +64,7 @@ impl ConnectionDelegate for Delegate { TcpStream::connect(remote).unwrap_or_else(|err| { #[cfg(feature = "log")] log::error!("Unable to connect BP Node {remote} due to {err}"); - eprintln!("Unable to connect BP Node {remote}"); + eprintln!("Unable to connect BP Node {remote} due to {err}"); exit(1); }) } @@ -74,9 +74,9 @@ impl ConnectionDelegate for Delegate { log::info!("connection to the server is established"); } - fn on_disconnect(&mut self, err: Error, _attempt: usize) -> OnDisconnect { + fn on_disconnect(&mut self, _err: Error, _attempt: usize) -> OnDisconnect { #[cfg(feature = "log")] - log::error!("disconnected due to {err}"); + log::error!("disconnected due to {_err}"); OnDisconnect::Terminate } diff --git a/client/src/exporter.rs b/client/src/exporter.rs index fd62d2f..6093e0f 100644 --- a/client/src/exporter.rs +++ b/client/src/exporter.rs @@ -57,22 +57,27 @@ impl ConnectionDelegate for BlockExporter { fn connect(&mut self, remote: &RemoteAddr) -> Session { TcpStream::connect(remote).unwrap_or_else(|err| { + #[cfg(feature = "log")] log::error!(target: NAME, "Unable to connect BP Node {remote} due to {err}"); + #[cfg(feature = "log")] log::warn!(target: NAME, "Stopping RPC import thread"); exit(1); }) } fn on_established(&mut self, remote: SocketAddr, _attempt: usize) { + #[cfg(feature = "log")] log::info!(target: NAME, "Connected to BP Node {remote}, sending `hello(...)`"); } fn on_disconnect(&mut self, err: std::io::Error, _attempt: usize) -> OnDisconnect { + #[cfg(feature = "log")] log::error!(target: NAME, "BP Node got disconnected due to {err}"); exit(1) } fn on_io_error(&mut self, err: reactor::Error>) { + #[cfg(feature = "log")] log::error!(target: NAME, "I/O error in communicating with BP Node: {err}"); self.disconnect(); } @@ -85,14 +90,17 @@ impl ClientDelegate for BlockExporter { match msg { ImporterReply::Filters(filters) => { if self.filters_received { + #[cfg(feature = "log")] log::warn!(target: NAME, "Received duplicate filters"); } else { + #[cfg(feature = "log")] log::info!(target: NAME, "Received filters"); } self.filters = filters; self.filters_received = true; } ImporterReply::Error(failure) => { + #[cfg(feature = "log")] log::error!(target: NAME, "Received error from BP Node: {failure}"); self.disconnect(); } @@ -100,6 +108,7 @@ impl ClientDelegate for BlockExporter { } fn on_reply_unparsable(&mut self, err: ::Error) { + #[cfg(feature = "log")] log::error!("Invalid message from BP Node: {err}"); } } From 238a41f130a9f3aa4a60018536de4ffb17eab413 Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Fri, 25 Apr 2025 11:05:56 +0800 Subject: [PATCH 18/21] fix(db): set transaction numbering to start from ONE Signed-off-by: will-bitlightlabs --- src/blocks.rs | 1 - src/db.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index 5291c80..7c4696e 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -294,7 +294,6 @@ impl BlockProcessor { // otherwise use the existing txno. This is mainly to avoid issues after block // reorganization, where the same txid in different blocks could be // assigned different txno values, leading to incorrect processing - let txid = tx.txid(); let txno = txids_table .get(txid.to_byte_array()) diff --git a/src/db.rs b/src/db.rs index 846ad78..0a678d9 100644 --- a/src/db.rs +++ b/src/db.rs @@ -53,7 +53,7 @@ pub type BlockId = Id; pub type ForkId = Id; impl TxNo { - pub fn start() -> Self { TxNo(u40::ZERO) } + pub fn start() -> Self { TxNo(u40::ONE) } pub fn inc_assign(&mut self) { self.0 += u40::ONE } From c74c36ac92ec99e8c7ce003864f8a5811f46a7e3 Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Fri, 25 Apr 2025 12:16:56 +0800 Subject: [PATCH 19/21] refactor: optimize database table access and reduce repeated opening Signed-off-by: will-bitlightlabs --- src/blocks.rs | 265 ++++++++++++++++++++++++++------------------------ 1 file changed, 138 insertions(+), 127 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index 7c4696e..43807af 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -238,18 +238,66 @@ impl BlockProcessor { let blockid = self.get_next_block_id(&db)?; - // Store block header - let mut table = db + // Open tables needed in the loop to avoid repeated opening/closing which affects + // performance + let mut blocks_table = db .open_table(TABLE_BLKS) .map_err(BlockProcError::BlockTable)?; - table - .insert(blockid, DbBlockHeader::from(block.header)) - .map_err(BlockProcError::BlockStorage)?; - // Map block hash to block ID let mut blockids_table = db .open_table(TABLE_BLOCKIDS) .map_err(|e| BlockProcError::Custom(format!("Block IDs table error: {}", e)))?; + + let mut heights_table = db + .open_table(TABLE_HEIGHTS) + .map_err(BlockProcError::HeightsTable)?; + + let mut block_heights_table = db + .open_table(TABLE_BLOCK_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; + + let mut txids_table = db + .open_table(TABLE_TXIDS) + .map_err(BlockProcError::TxidTable)?; + + let mut tx_blocks_table = db + .open_table(TABLE_TX_BLOCKS) + .map_err(|e| BlockProcError::Custom(format!("Tx-blocks table error: {}", e)))?; + + let mut utxos_table = db + .open_table(TABLE_UTXOS) + .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?; + + let mut inputs_table = db + .open_table(TABLE_INPUTS) + .map_err(|e| BlockProcError::Custom(format!("Inputs table error: {}", e)))?; + + let mut outs_table = db + .open_table(TABLE_OUTS) + .map_err(|e| BlockProcError::Custom(format!("Outs table error: {}", e)))?; + + let mut spks_table = db + .open_table(TABLE_SPKS) + .map_err(|e| BlockProcError::Custom(format!("SPKs table error: {}", e)))?; + + let mut txes_table = db + .open_table(TABLE_TXES) + .map_err(BlockProcError::TxesTable)?; + + let mut block_txs_table = db + .open_table(TABLE_BLOCK_TXS) + .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?; + + let mut block_spends_table = db + .open_table(TABLE_BLOCK_SPENDS) + .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?; + + // Store block header + blocks_table + .insert(blockid, DbBlockHeader::from(block.header)) + .map_err(BlockProcError::BlockStorage)?; + + // Map block hash to block ID blockids_table .insert(id.to_byte_array(), blockid) .map_err(|e| BlockProcError::Custom(format!("Block ID storage error: {}", e)))?; @@ -262,17 +310,11 @@ impl BlockProcessor { blockid ); - let mut heights_table = db - .open_table(TABLE_HEIGHTS) - .map_err(BlockProcError::HeightsTable)?; heights_table .insert(height, blockid) .map_err(|e| BlockProcError::Custom(format!("Heights storage error: {}", e)))?; // Also update the reverse mapping (blockid -> height) - let mut block_heights_table = db - .open_table(TABLE_BLOCK_HEIGHTS) - .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; block_heights_table.insert(blockid, height).map_err(|e| { BlockProcError::Custom(format!("Block height storage error: {}", e)) })?; @@ -285,11 +327,6 @@ impl BlockProcessor { // Process transactions in the block for tx in block.transactions { - // Store transaction ID to transaction number mapping - let mut txids_table = db - .open_table(TABLE_TXIDS) - .map_err(BlockProcError::TxidTable)?; - // Get txno from TABLE_TXIDS using txid. If it doesn't exist, use txno-counter, // otherwise use the existing txno. This is mainly to avoid issues after block // reorganization, where the same txid in different blocks could be @@ -312,9 +349,6 @@ impl BlockProcessor { .map_err(BlockProcError::TxidStorage)?; // Associate transaction with block ID - let mut tx_blocks_table = db - .open_table(TABLE_TX_BLOCKS) - .map_err(|e| BlockProcError::Custom(format!("Tx-blocks table error: {}", e)))?; tx_blocks_table.insert(txno, blockid).map_err(|e| { BlockProcError::Custom(format!("Tx-blocks storage error: {}", e)) })?; @@ -332,9 +366,6 @@ impl BlockProcessor { .map(|v| v.value()) { // Mark UTXO as spent - let mut utxos_table = db.open_table(TABLE_UTXOS).map_err(|e| { - BlockProcError::Custom(format!("UTXOs table error: {}", e)) - })?; utxos_table .remove(&(prev_txno, prev_vout.into_u32())) .map_err(|e| { @@ -345,9 +376,6 @@ impl BlockProcessor { block_spends.push((prev_txno, prev_vout.into_u32())); // Record input-output mapping - let mut inputs_table = db.open_table(TABLE_INPUTS).map_err(|e| { - BlockProcError::Custom(format!("Inputs table error: {}", e)) - })?; inputs_table .insert((txno, vin_idx as u32), (prev_txno, prev_vout.into_u32())) .map_err(|e| { @@ -355,9 +383,6 @@ impl BlockProcessor { })?; // Update spending relationships - let mut outs_table = db.open_table(TABLE_OUTS).map_err(|e| { - BlockProcError::Custom(format!("Outs table error: {}", e)) - })?; let mut spending_txs = outs_table .get(prev_txno) .map_err(|e| { @@ -376,9 +401,6 @@ impl BlockProcessor { // Process transaction outputs for (vout_idx, output) in tx.outputs.iter().enumerate() { // Add new UTXO - let mut utxos_table = db - .open_table(TABLE_UTXOS) - .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?; utxos_table .insert((txno, vout_idx as u32), ()) .map_err(|e| { @@ -388,9 +410,6 @@ impl BlockProcessor { // Index script pubkey let script = &output.script_pubkey; if !script.is_empty() { - let mut spks_table = db.open_table(TABLE_SPKS).map_err(|e| { - BlockProcError::Custom(format!("SPKs table error: {}", e)) - })?; let mut txnos = spks_table .get(script.as_slice()) .map_err(|e| { @@ -406,9 +425,6 @@ impl BlockProcessor { } // Store complete transaction - let mut txes_table = db - .open_table(TABLE_TXES) - .map_err(BlockProcError::TxesTable)?; txes_table .insert(txno, DbTx::from(tx)) .map_err(BlockProcError::TxesStorage)?; @@ -430,17 +446,11 @@ impl BlockProcessor { } // Store all transaction numbers in this block - let mut block_txs_table = db - .open_table(TABLE_BLOCK_TXS) - .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?; block_txs_table .insert(blockid, block_txs) .map_err(|e| BlockProcError::Custom(format!("Block-txs storage error: {}", e)))?; // Store UTXOs spent in this block - let mut block_spends_table = db - .open_table(TABLE_BLOCK_SPENDS) - .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?; block_spends_table .insert(blockid, block_spends) .map_err(|e| { @@ -1534,12 +1544,12 @@ impl BlockProcessor { ) -> Result, BlockProcError> { let mut blocks_to_rollback = Vec::new(); + let heights_table = db + .open_table(TABLE_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; + // We need to roll back from highest to lowest height for height in (start_height..=end_height).rev() { - let heights_table = db - .open_table(TABLE_HEIGHTS) - .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; - if let Some(block_id_record) = heights_table .get(height) .map_err(|e| BlockProcError::Custom(format!("Heights lookup error: {}", e)))? @@ -1585,6 +1595,14 @@ impl BlockProcessor { // Collect blocks (from high to low) let mut temp_blocks = Vec::new(); + let blks_table = db + .open_table(TABLE_BLKS) + .map_err(|e| BlockProcError::Custom(format!("Blocks table error: {}", e)))?; + + let blockids_table = db + .open_table(TABLE_BLOCKIDS) + .map_err(|e| BlockProcError::Custom(format!("Block IDs table error: {}", e)))?; + while current_height >= start_height { temp_blocks.push((current_height, current_block_id)); @@ -1593,10 +1611,6 @@ impl BlockProcessor { } // Find the parent of this block - let blks_table = db - .open_table(TABLE_BLKS) - .map_err(|e| BlockProcError::Custom(format!("Blocks table error: {}", e)))?; - let block_header = match blks_table .get(current_block_id) .map_err(|e| BlockProcError::Custom(format!("Block lookup error: {}", e)))? @@ -1613,10 +1627,6 @@ impl BlockProcessor { let prev_hash = block_header.as_ref().prev_block_hash; // Find the block ID for this hash - let blockids_table = db - .open_table(TABLE_BLOCKIDS) - .map_err(|e| BlockProcError::Custom(format!("Block IDs table error: {}", e)))?; - let prev_block_id = match blockids_table .get(prev_hash.to_byte_array()) .map_err(|e| BlockProcError::Custom(format!("Block ID lookup error: {}", e)))? @@ -1662,6 +1672,30 @@ impl BlockProcessor { let mut total_utxos_restored = 0; let mut total_utxos_removed = 0; + let block_spends_table = db + .open_table(TABLE_BLOCK_SPENDS) + .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?; + + let mut utxos_table = db + .open_table(TABLE_UTXOS) + .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?; + + let block_txs_table = db + .open_table(TABLE_BLOCK_TXS) + .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?; + + let txes_table = db + .open_table(TABLE_TXES) + .map_err(|e| BlockProcError::Custom(format!("Txes table error: {}", e)))?; + + let mut heights_table = db + .open_table(TABLE_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; + + let mut block_heights_table = db + .open_table(TABLE_BLOCK_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; + // Iterate through blocks to roll back (should be in descending height order) for &(height, block_id) in blocks { log::info!( @@ -1676,10 +1710,6 @@ impl BlockProcessor { let mut block_txs_removed = 0; // 1. Restore UTXOs spent in this block - let block_spends_table = db - .open_table(TABLE_BLOCK_SPENDS) - .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?; - if let Some(spends_record) = block_spends_table .get(block_id) .map_err(|e| BlockProcError::Custom(format!("Block spends lookup error: {}", e)))? @@ -1689,10 +1719,6 @@ impl BlockProcessor { total_utxos_restored += block_utxos_restored; // Restore each spent UTXO - let mut utxos_table = db - .open_table(TABLE_UTXOS) - .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?; - for (txno, vout) in spends { utxos_table.insert((txno, vout), ()).map_err(|e| { BlockProcError::Custom(format!("UTXO restoration error: {}", e)) @@ -1708,10 +1734,6 @@ impl BlockProcessor { } // 2. Find all transactions in this block - let block_txs_table = db - .open_table(TABLE_BLOCK_TXS) - .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?; - if let Some(txs_record) = block_txs_table .get(block_id) .map_err(|e| BlockProcError::Custom(format!("Block-txs lookup error: {}", e)))? @@ -1723,10 +1745,6 @@ impl BlockProcessor { // For each transaction for txno in txs { // 3. Remove UTXOs created by this transaction - let txes_table = db - .open_table(TABLE_TXES) - .map_err(|e| BlockProcError::Custom(format!("Txes table error: {}", e)))?; - if let Some(tx_record) = txes_table .get(txno) .map_err(|e| BlockProcError::Custom(format!("Tx lookup error: {}", e)))? @@ -1736,10 +1754,6 @@ impl BlockProcessor { block_utxos_removed += num_outputs; total_utxos_removed += num_outputs; - let mut utxos_table = db.open_table(TABLE_UTXOS).map_err(|e| { - BlockProcError::Custom(format!("UTXOs table error: {}", e)) - })?; - for vout in 0..num_outputs { utxos_table.remove(&(txno, vout as u32)).map_err(|e| { BlockProcError::Custom(format!("UTXO removal error: {}", e)) @@ -1757,18 +1771,10 @@ impl BlockProcessor { } // 4. Remove this block from the heights tables - let mut heights_table = db - .open_table(TABLE_HEIGHTS) - .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; - heights_table .remove(height) .map_err(|e| BlockProcError::Custom(format!("Heights removal error: {}", e)))?; - let mut block_heights_table = db - .open_table(TABLE_BLOCK_HEIGHTS) - .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; - block_heights_table.remove(block_id).map_err(|e| { BlockProcError::Custom(format!("Block height removal error: {}", e)) })?; @@ -1833,6 +1839,54 @@ impl BlockProcessor { let mut total_utxos_added = 0; let mut total_utxos_spent = 0; + let fork_blocks_table = db + .open_table(TABLE_FORK_BLOCKS) + .map_err(|e| BlockProcError::Custom(format!("Fork blocks table error: {}", e)))?; + + let mut txids_table = db + .open_table(TABLE_TXIDS) + .map_err(BlockProcError::TxidTable)?; + + let mut txes_table = db + .open_table(TABLE_TXES) + .map_err(BlockProcError::TxesTable)?; + + let mut tx_blocks_table = db + .open_table(TABLE_TX_BLOCKS) + .map_err(|e| BlockProcError::Custom(format!("Tx-blocks table error: {}", e)))?; + + let mut utxos_table = db + .open_table(TABLE_UTXOS) + .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?; + + let mut inputs_table = db + .open_table(TABLE_INPUTS) + .map_err(|e| BlockProcError::Custom(format!("Inputs table error: {}", e)))?; + + let mut outs_table = db + .open_table(TABLE_OUTS) + .map_err(|e| BlockProcError::Custom(format!("Outs table error: {}", e)))?; + + let mut spks_table = db + .open_table(TABLE_SPKS) + .map_err(|e| BlockProcError::Custom(format!("SPKs table error: {}", e)))?; + + let mut block_txs_table = db + .open_table(TABLE_BLOCK_TXS) + .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?; + + let mut block_spends_table = db + .open_table(TABLE_BLOCK_SPENDS) + .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?; + + let mut heights_table = db + .open_table(TABLE_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; + + let mut block_heights_table = db + .open_table(TABLE_BLOCK_HEIGHTS) + .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; + // Iterate through blocks to apply (should be in ascending height order) for &(height, block_id) in blocks { log::info!( @@ -1843,10 +1897,6 @@ impl BlockProcessor { ); // Get the complete block data from fork blocks table - let fork_blocks_table = db - .open_table(TABLE_FORK_BLOCKS) - .map_err(|e| BlockProcError::Custom(format!("Fork blocks table error: {}", e)))?; - let block_data = fork_blocks_table .get(block_id) .map_err(|e| BlockProcError::Custom(format!("Fork block lookup error: {}", e)))? @@ -1879,10 +1929,6 @@ impl BlockProcessor { // For fork blocks, txids may already be in the database with assigned txno // Check if this txid already exists - let mut txids_table = db - .open_table(TABLE_TXIDS) - .map_err(BlockProcError::TxidTable)?; - let existing_txno = txids_table .get(txid.to_byte_array()) .map_err(BlockProcError::TxidLookup)? @@ -1907,9 +1953,6 @@ impl BlockProcessor { .map_err(BlockProcError::TxidStorage)?; // Store the transaction data - let mut txes_table = db - .open_table(TABLE_TXES) - .map_err(BlockProcError::TxesTable)?; txes_table .insert(tx_txno, DbTx::from(tx.clone())) .map_err(BlockProcError::TxesStorage)?; @@ -1918,9 +1961,6 @@ impl BlockProcessor { } // Associate transaction with block ID (update even if transaction existed) - let mut tx_blocks_table = db - .open_table(TABLE_TX_BLOCKS) - .map_err(|e| BlockProcError::Custom(format!("Tx-blocks table error: {}", e)))?; tx_blocks_table.insert(tx_txno, block_id).map_err(|e| { BlockProcError::Custom(format!("Tx-blocks storage error: {}", e)) })?; @@ -1938,9 +1978,6 @@ impl BlockProcessor { .map(|v| v.value()) { // Mark UTXO as spent - let mut utxos_table = db.open_table(TABLE_UTXOS).map_err(|e| { - BlockProcError::Custom(format!("UTXOs table error: {}", e)) - })?; utxos_table .remove(&(prev_txno, prev_vout.into_u32())) .map_err(|e| { @@ -1953,9 +1990,6 @@ impl BlockProcessor { block_spends.push((prev_txno, prev_vout.into_u32())); // Record input-output mapping - let mut inputs_table = db.open_table(TABLE_INPUTS).map_err(|e| { - BlockProcError::Custom(format!("Inputs table error: {}", e)) - })?; inputs_table .insert( (tx_txno, vin_idx as u32), @@ -1966,9 +2000,6 @@ impl BlockProcessor { })?; // Update spending relationships - let mut outs_table = db.open_table(TABLE_OUTS).map_err(|e| { - BlockProcError::Custom(format!("Outs table error: {}", e)) - })?; let mut spending_txs = outs_table .get(prev_txno) .map_err(|e| { @@ -1991,9 +2022,6 @@ impl BlockProcessor { // Process transaction outputs for (vout_idx, output) in tx.outputs.iter().enumerate() { // Add new UTXO - let mut utxos_table = db - .open_table(TABLE_UTXOS) - .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?; utxos_table .insert((tx_txno, vout_idx as u32), ()) .map_err(|e| { @@ -2005,9 +2033,6 @@ impl BlockProcessor { // Index script pubkey let script = &output.script_pubkey; if !script.is_empty() { - let mut spks_table = db.open_table(TABLE_SPKS).map_err(|e| { - BlockProcError::Custom(format!("SPKs table error: {}", e)) - })?; let mut txnos = spks_table .get(script.as_slice()) .map_err(|e| { @@ -2028,17 +2053,11 @@ impl BlockProcessor { } // Store all transaction numbers in this block - let mut block_txs_table = db - .open_table(TABLE_BLOCK_TXS) - .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?; block_txs_table .insert(block_id, block_txs) .map_err(|e| BlockProcError::Custom(format!("Block-txs storage error: {}", e)))?; // Store UTXOs spent in this block - let mut block_spends_table = db - .open_table(TABLE_BLOCK_SPENDS) - .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?; block_spends_table .insert(block_id, block_spends) .map_err(|e| { @@ -2046,18 +2065,10 @@ impl BlockProcessor { })?; // Update the heights tables - let mut heights_table = db - .open_table(TABLE_HEIGHTS) - .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; - heights_table .insert(height, block_id) .map_err(|e| BlockProcError::Custom(format!("Heights storage error: {}", e)))?; - let mut block_heights_table = db - .open_table(TABLE_BLOCK_HEIGHTS) - .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; - block_heights_table.insert(block_id, height).map_err(|e| { BlockProcError::Custom(format!("Block height storage error: {}", e)) })?; From ddb6ba82ce83a84c0fb81d62994cfdf53a946aba Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Fri, 25 Apr 2025 15:43:32 +0800 Subject: [PATCH 20/21] feat: enhance block rollback process with comprehensive cleanup Signed-off-by: will-bitlightlabs --- src/blocks.rs | 145 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 136 insertions(+), 9 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index 43807af..8195d07 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -1671,6 +1671,9 @@ impl BlockProcessor { let mut total_txs_removed = 0; let mut total_utxos_restored = 0; let mut total_utxos_removed = 0; + let mut total_spk_entries_cleaned = 0; + let mut total_inputs_cleaned = 0; + let mut total_outs_refs_cleaned = 0; let block_spends_table = db .open_table(TABLE_BLOCK_SPENDS) @@ -1696,6 +1699,26 @@ impl BlockProcessor { .open_table(TABLE_BLOCK_HEIGHTS) .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; + // Open the SPKs table for script pubkey cleanup during rollback + let mut spks_table = db + .open_table(TABLE_SPKS) + .map_err(|e| BlockProcError::Custom(format!("SPKs table error: {}", e)))?; + + // Open the inputs table to clean up input references + let mut inputs_table = db + .open_table(TABLE_INPUTS) + .map_err(|e| BlockProcError::Custom(format!("Inputs table error: {}", e)))?; + + // Open the outs table to clean up spending relationships + let mut outs_table = db + .open_table(TABLE_OUTS) + .map_err(|e| BlockProcError::Custom(format!("Outs table error: {}", e)))?; + + // Open tx_blocks table to clean up transaction-block associations + let mut tx_blocks_table = db + .open_table(TABLE_TX_BLOCKS) + .map_err(|e| BlockProcError::Custom(format!("TX-Blocks table error: {}", e)))?; + // Iterate through blocks to roll back (should be in descending height order) for &(height, block_id) in blocks { log::info!( @@ -1708,6 +1731,9 @@ impl BlockProcessor { let mut block_utxos_restored = 0; let mut block_utxos_removed = 0; let mut block_txs_removed = 0; + let mut block_spk_entries_cleaned = 0; + let mut block_inputs_cleaned = 0; + let mut block_outs_refs_cleaned = 0; // 1. Restore UTXOs spent in this block if let Some(spends_record) = block_spends_table @@ -1750,12 +1776,17 @@ impl BlockProcessor { .map_err(|e| BlockProcError::Custom(format!("Tx lookup error: {}", e)))? { let tx = tx_record.value(); - let num_outputs = tx.as_ref().outputs.len(); + let outputs = tx.as_ref().outputs.as_slice(); + let num_outputs = outputs.len(); block_utxos_removed += num_outputs; total_utxos_removed += num_outputs; - for vout in 0..num_outputs { - utxos_table.remove(&(txno, vout as u32)).map_err(|e| { + // Get the number of inputs for this transaction + let inputs_count = tx.as_ref().inputs.len(); + + for (vout_idx, output) in outputs.iter().enumerate() { + // Remove UTXOs + utxos_table.remove(&(txno, vout_idx as u32)).map_err(|e| { BlockProcError::Custom(format!("UTXO removal error: {}", e)) })?; @@ -1763,14 +1794,101 @@ impl BlockProcessor { target: NAME, "Removed UTXO: txno={}, vout={}", txno, - vout + vout_idx + ); + + // 4. Clean up script pubkey index for this transaction output + let script = &output.script_pubkey; + if !script.is_empty() { + let txnos = spks_table + .get(script.as_slice()) + .map_err(|e| { + BlockProcError::Custom(format!("SPKs lookup error: {}", e)) + })? + .map(|t| t.value().to_vec()); + + if let Some(mut txnos) = txnos { + // Remove this transaction from the list + if let Some(pos) = txnos.iter().position(|&t| t == txno) { + txnos.remove(pos); + block_spk_entries_cleaned += 1; + + // If the list is not empty, update it; otherwise, remove + // the entry + if !txnos.is_empty() { + spks_table.insert(script.as_slice(), txnos).map_err( + |e| { + BlockProcError::Custom(format!( + "SPKs update error: {}", + e + )) + }, + )?; + } else { + spks_table.remove(script.as_slice()).map_err(|e| { + BlockProcError::Custom(format!( + "SPKs removal error: {}", + e + )) + })?; + } + + log::debug!( + target: NAME, + "Cleaned up SPK index for txno={}, vout={}", + txno, + vout_idx + ); + } + } + } + } + + // 5. Clean up inputs table for this transaction + for input_idx in 0..inputs_count { + if inputs_table + .remove(&(txno, input_idx as u32)) + .map_err(|e| { + BlockProcError::Custom(format!("Inputs removal error: {}", e)) + })? + .is_some() + { + block_inputs_cleaned += 1; + log::debug!( + target: NAME, + "Removed input reference: txno={}, input_idx={}", + txno, + input_idx + ); + } + } + total_inputs_cleaned += block_inputs_cleaned; + + // 6. Clean up this transaction from spending relationships + if outs_table + .remove(txno) + .map_err(|e| { + BlockProcError::Custom(format!("Outs lookup error: {}", e)) + })? + .is_some() + { + block_outs_refs_cleaned += 1; + log::debug!( + target: NAME, + "Removed spending relationship for txno={}", + txno ); } } + + // 7. Remove transaction-block association + tx_blocks_table.remove(txno).map_err(|e| { + BlockProcError::Custom(format!("TX-Blocks removal error: {}", e)) + })?; } } - // 4. Remove this block from the heights tables + // 8. Remove this block from the heights tables heights_table .remove(height) .map_err(|e| BlockProcError::Custom(format!("Heights removal error: {}", e)))?; @@ -1786,23 +1904,32 @@ impl BlockProcessor { block_id ); + total_spk_entries_cleaned += block_spk_entries_cleaned; + total_outs_refs_cleaned += block_outs_refs_cleaned; + log::info!( target: NAME, - "Block rollback stats for height {}: removed {} transactions, restored {} UTXOs, removed {} UTXOs", + "Block rollback stats for height {}: removed {} transactions, restored {} UTXOs, removed {} UTXOs, cleaned {} SPK entries, {} input refs, {} output refs", height, block_txs_removed, block_utxos_restored, - block_utxos_removed + block_utxos_removed, + block_spk_entries_cleaned, + block_inputs_cleaned, + block_outs_refs_cleaned ); } log::info!( target: NAME, - "Successfully rolled back {} blocks: removed {} transactions, restored {} UTXOs, removed {} UTXOs", + "Successfully rolled back {} blocks: removed {} transactions, restored {} UTXOs, removed {} UTXOs, cleaned {} SPK entries, {} input refs, {} output refs", blocks.len(), total_txs_removed, total_utxos_restored, - total_utxos_removed + total_utxos_removed, + total_spk_entries_cleaned, + total_inputs_cleaned, + total_outs_refs_cleaned ); Ok(()) From ec2e560ada430148730c925d2533172502084418 Mon Sep 17 00:00:00 2001 From: will-bitlightlabs Date: Fri, 25 Apr 2025 16:47:32 +0800 Subject: [PATCH 21/21] feat: implement TxTablesContext for transaction processing optimization Signed-off-by: will-bitlightlabs --- src/blocks.rs | 675 ++++++++++++++++++++++++-------------------------- 1 file changed, 330 insertions(+), 345 deletions(-) diff --git a/src/blocks.rs b/src/blocks.rs index 8195d07..ce3828b 100644 --- a/src/blocks.rs +++ b/src/blocks.rs @@ -52,6 +52,271 @@ const MAX_ORPHAN_BLOCKS: usize = 100; // Orphan blocks expire after 24 hours const ORPHAN_EXPIRY_HOURS: u64 = 24; +/// Table context for transaction processing +/// +/// This structure holds references to all database tables needed for transaction processing. +/// It helps avoid repeated opening of the same tables and provides a unified interface for +/// transaction processing logic that can be shared between different block processing functions. +struct TxTablesContext<'a> { + /// Maps transaction IDs to transaction numbers + txids_table: redb::Table<'a, [u8; 32], TxNo>, + + /// Maps transaction numbers to block IDs + tx_blocks_table: redb::Table<'a, TxNo, BlockId>, + + /// Tracks unspent transaction outputs (UTXOs) + utxos_table: redb::Table<'a, (TxNo, u32), ()>, + + /// Maps transaction inputs to the outputs they spend + inputs_table: redb::Table<'a, (TxNo, u32), (TxNo, u32)>, + + /// Maps transaction numbers to transactions that spend their outputs + outs_table: redb::Table<'a, TxNo, Vec>, + + /// Maps script public keys to transactions containing them + spks_table: redb::Table<'a, &'static [u8], Vec>, + + /// Stores complete transaction data + txes_table: redb::Table<'a, TxNo, DbTx>, + + /// Maps block IDs to transactions they contain + block_txs_table: redb::Table<'a, BlockId, Vec>, + + /// Records UTXOs spent in each block (for rollback purposes) + block_spends_table: redb::Table<'a, BlockId, Vec<(TxNo, u32)>>, +} + +impl<'a> TxTablesContext<'a> { + /// Creates a new transaction tables context from a database transaction + fn new(db: &'a WriteTransaction) -> Result { + Ok(Self { + txids_table: db + .open_table(TABLE_TXIDS) + .map_err(BlockProcError::TxidTable)?, + + tx_blocks_table: db + .open_table(TABLE_TX_BLOCKS) + .map_err(|e| BlockProcError::Custom(format!("Tx-blocks table error: {}", e)))?, + + utxos_table: db + .open_table(TABLE_UTXOS) + .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?, + + inputs_table: db + .open_table(TABLE_INPUTS) + .map_err(|e| BlockProcError::Custom(format!("Inputs table error: {}", e)))?, + + outs_table: db + .open_table(TABLE_OUTS) + .map_err(|e| BlockProcError::Custom(format!("Outs table error: {}", e)))?, + + spks_table: db + .open_table(TABLE_SPKS) + .map_err(|e| BlockProcError::Custom(format!("SPKs table error: {}", e)))?, + + txes_table: db + .open_table(TABLE_TXES) + .map_err(BlockProcError::TxesTable)?, + + block_txs_table: db + .open_table(TABLE_BLOCK_TXS) + .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?, + + block_spends_table: db + .open_table(TABLE_BLOCK_SPENDS) + .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?, + }) + } + + /// Process a single transaction, handling all database operations + /// + /// This method abstracts the common logic for processing transactions in both + /// normal block processing and during chain reorganization. + /// + /// # Parameters + /// * `tx` - The transaction to process + /// * `block_id` - ID of the block containing the transaction + /// * `txno_counter` - Current transaction number counter (will be incremented if needed) + /// * `block_txs` - Vector to collect transaction IDs for this block + /// * `block_spends` - Vector to collect UTXOs spent in this block + /// + /// # Returns + /// * `Result<(TxNo, bool), BlockProcError>` - Transaction number and whether it's a new + /// transaction + fn process_transaction( + &mut self, + tx: &bpwallet::Tx, + block_id: BlockId, + txno_counter: &mut TxNo, + block_txs: &mut Vec, + block_spends: &mut Vec<(TxNo, u32)>, + ) -> Result<(TxNo, bool), BlockProcError> { + // Calculate transaction ID + let txid = tx.txid(); + let txid_bytes = txid.to_byte_array(); + + // Check if this txid already exists + let existing_txno = self + .txids_table + .get(txid_bytes) + .map_err(BlockProcError::TxidLookup)? + .map(|v| v.value()); + + // Get or assign transaction number + let txno = if let Some(existing) = existing_txno { + existing // Use existing transaction number + } else { + // Assign new transaction number + txno_counter.inc_assign(); + *txno_counter + }; + + // Add transaction to the list for this block + block_txs.push(txno); + + // Store transaction ID mapping (or update if needed) + self.txids_table + .insert(txid_bytes, txno) + .map_err(BlockProcError::TxidStorage)?; + + // Associate transaction with block ID + self.tx_blocks_table + .insert(txno, block_id) + .map_err(|e| BlockProcError::Custom(format!("Tx-blocks storage error: {}", e)))?; + + // Process transaction inputs + for (vin_idx, input) in tx.inputs.iter().enumerate() { + if !input.prev_output.is_coinbase() { + let prev_txid = input.prev_output.txid; + let prev_vout = input.prev_output.vout; + + // Look up previous transaction number + if let Some(prev_txno) = self + .txids_table + .get(prev_txid.to_byte_array()) + .map_err(BlockProcError::TxidLookup)? + .map(|v| v.value()) + { + // Mark UTXO as spent + self.utxos_table + .remove(&(prev_txno, prev_vout.into_u32())) + .map_err(|e| { + BlockProcError::Custom(format!("UTXOs removal error: {}", e)) + })?; + + // Record UTXO spent in this block + block_spends.push((prev_txno, prev_vout.into_u32())); + + // Record input-output mapping + self.inputs_table + .insert((txno, vin_idx as u32), (prev_txno, prev_vout.into_u32())) + .map_err(|e| { + BlockProcError::Custom(format!("Inputs storage error: {}", e)) + })?; + + // Update spending relationships + let mut spending_txs = self + .outs_table + .get(prev_txno) + .map_err(|e| BlockProcError::Custom(format!("Outs lookup error: {}", e)))? + .map(|v| v.value().to_vec()) + .unwrap_or_default(); + + // Avoid duplicate entries in fork case + if !spending_txs.contains(&txno) { + spending_txs.push(txno); + self.outs_table + .insert(prev_txno, spending_txs) + .map_err(|e| { + BlockProcError::Custom(format!("Outs update error: {}", e)) + })?; + } + } + } + } + + // Process transaction outputs + for (vout_idx, output) in tx.outputs.iter().enumerate() { + // Add new UTXO + self.utxos_table + .insert((txno, vout_idx as u32), ()) + .map_err(|e| BlockProcError::Custom(format!("UTXOs storage error: {}", e)))?; + + // Index script pubkey + let script = &output.script_pubkey; + if !script.is_empty() { + let mut txnos = self + .spks_table + .get(script.as_slice()) + .map_err(|e| BlockProcError::Custom(format!("SPKs lookup error: {}", e)))? + .map(|v| v.value().to_vec()) + .unwrap_or_default(); + + // Avoid duplicate entries in fork case + if !txnos.contains(&txno) { + txnos.push(txno); + self.spks_table + .insert(script.as_slice(), txnos) + .map_err(|e| BlockProcError::Custom(format!("SPKs update error: {}", e)))?; + } + } + } + + // Store complete transaction data if it's new + if existing_txno.is_none() { + self.txes_table + .insert(txno, DbTx::from(tx.clone())) + .map_err(BlockProcError::TxesStorage)?; + } + + // Return transaction number and whether it was newly added + Ok((txno, existing_txno.is_none())) + } + + /// Finalize block processing by storing block transaction and spend data + /// + /// This method handles the common post-processing steps after all transactions + /// in a block have been processed. + /// + /// # Parameters + /// * `block_id` - ID of the processed block + /// * `block_txs` - Transaction IDs in this block + /// * `block_spends` - UTXOs spent in this block + /// * `txno_counter` - Current transaction number counter to update in the main table + /// + /// # Returns + /// * `Result<(), BlockProcError>` - Success or error + fn finalize_block_processing( + &mut self, + db: &WriteTransaction, + block_id: BlockId, + block_txs: Vec, + block_spends: Vec<(TxNo, u32)>, + txno_counter: TxNo, + ) -> Result<(), BlockProcError> { + // Store all transaction numbers in this block + self.block_txs_table + .insert(block_id, block_txs) + .map_err(|e| BlockProcError::Custom(format!("Block-txs storage error: {}", e)))?; + + // Store UTXOs spent in this block + self.block_spends_table + .insert(block_id, block_spends) + .map_err(|e| BlockProcError::Custom(format!("Block spends storage error: {}", e)))?; + + // Update global counters + let mut main = db + .open_table(TABLE_MAIN) + .map_err(BlockProcError::MainTable)?; + + // Update transaction counter + main.insert(REC_TXNO, txno_counter.to_byte_array().as_slice()) + .map_err(BlockProcError::TxNoUpdate)?; + + Ok(()) + } +} + pub struct BlockProcessor { db: USender, broker: Sender, @@ -69,6 +334,27 @@ impl BlockProcessor { self.tracking.retain(|filter| !filters.contains(filter)); } + /// Check if a transaction should trigger a notification based on tracking filters + /// + /// # Parameters + /// * `txid_bytes` - Transaction ID bytes to check against filters + /// + /// # Returns + /// * `bool` - Whether notification should be sent + fn should_notify_transaction(&self, txid_bytes: [u8; 32]) -> bool { + if self.tracking.is_empty() { + return false; + } + + for filter in &self.tracking { + if filter.contains(txid_bytes) { + return true; + } + } + + false + } + // Helper function to calculate block height based on previous block hash fn calculate_block_height( &self, @@ -223,10 +509,7 @@ impl BlockProcessor { // Get current transaction number or use starting value if not found match main.get(REC_TXNO).map_err(BlockProcError::TxNoAbsent)? { Some(rec) => TxNo::from_slice(rec.value()).map_err(BlockProcError::TxNoInvalid)?, - None => { - log::debug!(target: NAME, "No transaction counter found, starting from zero"); - TxNo::start() - } + None => TxNo::start(), } }; @@ -238,8 +521,8 @@ impl BlockProcessor { let blockid = self.get_next_block_id(&db)?; - // Open tables needed in the loop to avoid repeated opening/closing which affects - // performance + // Initialize the transaction tables context + let mut tx_ctx = TxTablesContext::new(&db)?; let mut blocks_table = db .open_table(TABLE_BLKS) .map_err(BlockProcError::BlockTable)?; @@ -256,42 +539,6 @@ impl BlockProcessor { .open_table(TABLE_BLOCK_HEIGHTS) .map_err(|e| BlockProcError::Custom(format!("Block heights table error: {}", e)))?; - let mut txids_table = db - .open_table(TABLE_TXIDS) - .map_err(BlockProcError::TxidTable)?; - - let mut tx_blocks_table = db - .open_table(TABLE_TX_BLOCKS) - .map_err(|e| BlockProcError::Custom(format!("Tx-blocks table error: {}", e)))?; - - let mut utxos_table = db - .open_table(TABLE_UTXOS) - .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?; - - let mut inputs_table = db - .open_table(TABLE_INPUTS) - .map_err(|e| BlockProcError::Custom(format!("Inputs table error: {}", e)))?; - - let mut outs_table = db - .open_table(TABLE_OUTS) - .map_err(|e| BlockProcError::Custom(format!("Outs table error: {}", e)))?; - - let mut spks_table = db - .open_table(TABLE_SPKS) - .map_err(|e| BlockProcError::Custom(format!("SPKs table error: {}", e)))?; - - let mut txes_table = db - .open_table(TABLE_TXES) - .map_err(BlockProcError::TxesTable)?; - - let mut block_txs_table = db - .open_table(TABLE_BLOCK_TXS) - .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?; - - let mut block_spends_table = db - .open_table(TABLE_BLOCK_SPENDS) - .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?; - // Store block header blocks_table .insert(blockid, DbBlockHeader::from(block.header)) @@ -327,144 +574,32 @@ impl BlockProcessor { // Process transactions in the block for tx in block.transactions { - // Get txno from TABLE_TXIDS using txid. If it doesn't exist, use txno-counter, - // otherwise use the existing txno. This is mainly to avoid issues after block - // reorganization, where the same txid in different blocks could be - // assigned different txno values, leading to incorrect processing - let txid = tx.txid(); - let txno = txids_table - .get(txid.to_byte_array()) - .map_err(BlockProcError::TxidLookup)? - .map(|v| v.value()) - .unwrap_or_else(|| { - txno_counter.inc_assign(); - txno_counter - }); - - // Add transaction to the list for this block - block_txs.push(txno); - - txids_table - .insert(txid.to_byte_array(), txno) - .map_err(BlockProcError::TxidStorage)?; - - // Associate transaction with block ID - tx_blocks_table.insert(txno, blockid).map_err(|e| { - BlockProcError::Custom(format!("Tx-blocks storage error: {}", e)) - })?; - - // Process transaction inputs - for (vin_idx, input) in tx.inputs.iter().enumerate() { - if !input.prev_output.is_coinbase() { - let prev_txid = input.prev_output.txid; - let prev_vout = input.prev_output.vout; - - // Look up previous transaction number - if let Some(prev_txno) = txids_table - .get(prev_txid.to_byte_array()) - .map_err(BlockProcError::TxidLookup)? - .map(|v| v.value()) - { - // Mark UTXO as spent - utxos_table - .remove(&(prev_txno, prev_vout.into_u32())) - .map_err(|e| { - BlockProcError::Custom(format!("UTXOs removal error: {}", e)) - })?; - - // Record UTXO spent in this block - block_spends.push((prev_txno, prev_vout.into_u32())); - - // Record input-output mapping - inputs_table - .insert((txno, vin_idx as u32), (prev_txno, prev_vout.into_u32())) - .map_err(|e| { - BlockProcError::Custom(format!("Inputs storage error: {}", e)) - })?; - - // Update spending relationships - let mut spending_txs = outs_table - .get(prev_txno) - .map_err(|e| { - BlockProcError::Custom(format!("Outs lookup error: {}", e)) - })? - .map(|v| v.value().to_vec()) - .unwrap_or_default(); - spending_txs.push(txno); - outs_table.insert(prev_txno, spending_txs).map_err(|e| { - BlockProcError::Custom(format!("Outs update error: {}", e)) - })?; - } - } - } - - // Process transaction outputs - for (vout_idx, output) in tx.outputs.iter().enumerate() { - // Add new UTXO - utxos_table - .insert((txno, vout_idx as u32), ()) - .map_err(|e| { - BlockProcError::Custom(format!("UTXOs storage error: {}", e)) - })?; - - // Index script pubkey - let script = &output.script_pubkey; - if !script.is_empty() { - let mut txnos = spks_table - .get(script.as_slice()) - .map_err(|e| { - BlockProcError::Custom(format!("SPKs lookup error: {}", e)) - })? - .map(|v| v.value().to_vec()) - .unwrap_or_default(); - txnos.push(txno); - spks_table.insert(script.as_slice(), txnos).map_err(|e| { - BlockProcError::Custom(format!("SPKs update error: {}", e)) - })?; - } - } - - // Store complete transaction - txes_table - .insert(txno, DbTx::from(tx)) - .map_err(BlockProcError::TxesStorage)?; + let _ = tx_ctx.process_transaction( + &tx, + blockid, + &mut txno_counter, + &mut block_txs, + &mut block_spends, + )?; // Check if transaction ID is in tracking list and notify if needed + let txid = tx.txid(); let txid_bytes = txid.to_byte_array(); - let mut should_notify = false; - for filter in &self.tracking { - if filter.contains(txid_bytes) { - should_notify = true; - break; - } - } - if should_notify { + if self.should_notify_transaction(txid_bytes) { self.broker.send(ImporterMsg::Mined(txid))?; } count += 1; } - // Store all transaction numbers in this block - block_txs_table - .insert(blockid, block_txs) - .map_err(|e| BlockProcError::Custom(format!("Block-txs storage error: {}", e)))?; - - // Store UTXOs spent in this block - block_spends_table - .insert(blockid, block_spends) - .map_err(|e| { - BlockProcError::Custom(format!("Block spends storage error: {}", e)) - })?; - - // Update global counters - let mut main = db - .open_table(TABLE_MAIN) - .map_err(BlockProcError::MainTable)?; - - // Update transaction counter - main.insert(REC_TXNO, txno_counter.to_byte_array().as_slice()) - .map_err(BlockProcError::TxNoUpdate)?; + // Finalize the block processing + tx_ctx.finalize_block_processing( + &db, + blockid, + block_txs, + block_spends, + txno_counter, + )?; // Log successful block processing log::debug!( @@ -1955,10 +2090,7 @@ impl BlockProcessor { // Get current transaction number or use starting value if not found match main.get(REC_TXNO).map_err(BlockProcError::TxNoAbsent)? { Some(rec) => TxNo::from_slice(rec.value()).map_err(BlockProcError::TxNoInvalid)?, - None => { - log::debug!(target: NAME, "No transaction counter found, starting from zero"); - TxNo::start() - } + None => TxNo::start(), } }; @@ -1966,46 +2098,12 @@ impl BlockProcessor { let mut total_utxos_added = 0; let mut total_utxos_spent = 0; + let mut tx_ctx = TxTablesContext::new(db)?; + let fork_blocks_table = db .open_table(TABLE_FORK_BLOCKS) .map_err(|e| BlockProcError::Custom(format!("Fork blocks table error: {}", e)))?; - let mut txids_table = db - .open_table(TABLE_TXIDS) - .map_err(BlockProcError::TxidTable)?; - - let mut txes_table = db - .open_table(TABLE_TXES) - .map_err(BlockProcError::TxesTable)?; - - let mut tx_blocks_table = db - .open_table(TABLE_TX_BLOCKS) - .map_err(|e| BlockProcError::Custom(format!("Tx-blocks table error: {}", e)))?; - - let mut utxos_table = db - .open_table(TABLE_UTXOS) - .map_err(|e| BlockProcError::Custom(format!("UTXOs table error: {}", e)))?; - - let mut inputs_table = db - .open_table(TABLE_INPUTS) - .map_err(|e| BlockProcError::Custom(format!("Inputs table error: {}", e)))?; - - let mut outs_table = db - .open_table(TABLE_OUTS) - .map_err(|e| BlockProcError::Custom(format!("Outs table error: {}", e)))?; - - let mut spks_table = db - .open_table(TABLE_SPKS) - .map_err(|e| BlockProcError::Custom(format!("SPKs table error: {}", e)))?; - - let mut block_txs_table = db - .open_table(TABLE_BLOCK_TXS) - .map_err(|e| BlockProcError::Custom(format!("Block-txs table error: {}", e)))?; - - let mut block_spends_table = db - .open_table(TABLE_BLOCK_SPENDS) - .map_err(|e| BlockProcError::Custom(format!("Block spends table error: {}", e)))?; - let mut heights_table = db .open_table(TABLE_HEIGHTS) .map_err(|e| BlockProcError::Custom(format!("Heights table error: {}", e)))?; @@ -2052,144 +2150,38 @@ impl BlockProcessor { // Process all transactions in the block for tx in &block.transactions { - let txid = tx.txid(); - - // For fork blocks, txids may already be in the database with assigned txno - // Check if this txid already exists - let existing_txno = txids_table - .get(txid.to_byte_array()) - .map_err(BlockProcError::TxidLookup)? - .map(|v| v.value()); - - let tx_txno = if let Some(existing) = existing_txno { - // Use the existing transaction number - existing - } else { - // Assign a new transaction number - txno.inc_assign(); - txno - }; - - // Add transaction to the list for this block - block_txs.push(tx_txno); - - // If this is a new transaction, store its mapping and data - if existing_txno.is_none() { - txids_table - .insert(txid.to_byte_array(), tx_txno) - .map_err(BlockProcError::TxidStorage)?; + let (_, is_new) = tx_ctx.process_transaction( + tx, + block_id, + &mut txno, + &mut block_txs, + &mut block_spends, + )?; - // Store the transaction data - txes_table - .insert(tx_txno, DbTx::from(tx.clone())) - .map_err(BlockProcError::TxesStorage)?; + // Check if transaction ID is in tracking list and notify if needed + let txid = tx.txid(); + let txid_bytes = txid.to_byte_array(); + if self.should_notify_transaction(txid_bytes) { + self.broker.send(ImporterMsg::Mined(txid))?; + } + if is_new { block_txs_added += 1; } - // Associate transaction with block ID (update even if transaction existed) - tx_blocks_table.insert(tx_txno, block_id).map_err(|e| { - BlockProcError::Custom(format!("Tx-blocks storage error: {}", e)) - })?; + // Count UTXOs added (outputs) + block_utxos_added += tx.outputs.len(); - // Process transaction inputs - for (vin_idx, input) in tx.inputs.iter().enumerate() { + // Count UTXOs spent (inputs except coinbase) + for input in &tx.inputs { if !input.prev_output.is_coinbase() { - let prev_txid = input.prev_output.txid; - let prev_vout = input.prev_output.vout; - - // Look up previous transaction number - if let Some(prev_txno) = txids_table - .get(prev_txid.to_byte_array()) - .map_err(BlockProcError::TxidLookup)? - .map(|v| v.value()) - { - // Mark UTXO as spent - utxos_table - .remove(&(prev_txno, prev_vout.into_u32())) - .map_err(|e| { - BlockProcError::Custom(format!("UTXOs removal error: {}", e)) - })?; - - block_utxos_spent += 1; - - // Record UTXO spent in this block - block_spends.push((prev_txno, prev_vout.into_u32())); - - // Record input-output mapping - inputs_table - .insert( - (tx_txno, vin_idx as u32), - (prev_txno, prev_vout.into_u32()), - ) - .map_err(|e| { - BlockProcError::Custom(format!("Inputs storage error: {}", e)) - })?; - - // Update spending relationships - let mut spending_txs = outs_table - .get(prev_txno) - .map_err(|e| { - BlockProcError::Custom(format!("Outs lookup error: {}", e)) - })? - .map(|v| v.value().to_vec()) - .unwrap_or_default(); - - // Avoid duplicate entries - if !spending_txs.contains(&tx_txno) { - spending_txs.push(tx_txno); - outs_table.insert(prev_txno, spending_txs).map_err(|e| { - BlockProcError::Custom(format!("Outs update error: {}", e)) - })?; - } - } - } - } - - // Process transaction outputs - for (vout_idx, output) in tx.outputs.iter().enumerate() { - // Add new UTXO - utxos_table - .insert((tx_txno, vout_idx as u32), ()) - .map_err(|e| { - BlockProcError::Custom(format!("UTXOs storage error: {}", e)) - })?; - - block_utxos_added += 1; - - // Index script pubkey - let script = &output.script_pubkey; - if !script.is_empty() { - let mut txnos = spks_table - .get(script.as_slice()) - .map_err(|e| { - BlockProcError::Custom(format!("SPKs lookup error: {}", e)) - })? - .map(|v| v.value().to_vec()) - .unwrap_or_default(); - - // Avoid duplicate entries - if !txnos.contains(&tx_txno) { - txnos.push(tx_txno); - spks_table.insert(script.as_slice(), txnos).map_err(|e| { - BlockProcError::Custom(format!("SPKs update error: {}", e)) - })?; - } + block_utxos_spent += 1; } } } - // Store all transaction numbers in this block - block_txs_table - .insert(block_id, block_txs) - .map_err(|e| BlockProcError::Custom(format!("Block-txs storage error: {}", e)))?; - - // Store UTXOs spent in this block - block_spends_table - .insert(block_id, block_spends) - .map_err(|e| { - BlockProcError::Custom(format!("Block spends storage error: {}", e)) - })?; + // Finalize the block processing + tx_ctx.finalize_block_processing(db, block_id, block_txs, block_spends, txno)?; // Update the heights tables heights_table @@ -2221,13 +2213,6 @@ impl BlockProcessor { ); } - // Update the global transaction counter - let mut main = db - .open_table(TABLE_MAIN) - .map_err(BlockProcError::MainTable)?; - main.insert(REC_TXNO, txno.to_byte_array().as_slice()) - .map_err(BlockProcError::TxNoUpdate)?; - log::info!( target: NAME, "Successfully applied {} blocks: added {} transactions, added {} UTXOs, spent {} UTXOs",