From b0705d0221f727f00584629b5cd858d88dfbb802 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Sat, 7 Dec 2024 11:51:10 -0600 Subject: [PATCH 001/238] fix: new query --- stackslib/src/core/mempool.rs | 99 +++++++++++++++-------------------- 1 file changed, 43 insertions(+), 56 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 46ff54924b2..0ae18cd6266 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1645,8 +1645,6 @@ impl MemPoolDB { debug!("Mempool walk for {}ms", settings.max_walk_time_ms,); - let tx_consideration_sampler = Uniform::new(0, 100); - let mut rng = rand::thread_rng(); let mut candidate_cache = CandidateCache::new(settings.candidate_retry_cache_size); let mut nonce_cache = NonceCache::new(settings.nonce_cache_size); @@ -1654,30 +1652,43 @@ impl MemPoolDB { // single transaction. This cannot grow to more than `settings.nonce_cache_size` entries. let mut retry_store = HashMap::new(); + // Iterate pending mempool transactions using a heuristic that maximizes miner fee profitability and minimizes CPU time + // wasted on already-mined or not-yet-mineable transactions. This heuristic takes the following steps: + // + // 1. Tries to filter out transactions that have nonces smaller than the origin address' next expected nonce as stated in + // the `nonces` table, if available + // 2. Groups remaining transactions by origin address and ranks them prioritizing those with smaller nonces and higher + // fees + // 3. Sorts all ranked transactions by fee and returns them for evaluation + // + // This logic prevents miners from repeatedly visiting (and then skipping) high fee transactions that would get evaluated + // first based on their `fee_rate` but are otherwise non-mineable because they have very high or invalid nonces. A large + // volume of these transactions would cause considerable slowness when selecting valid transactions to mine. + // + // This query also makes sure transactions that have NULL `fee_rate`s are visited, because they will also get ranked + // according to their nonce and then sub-sorted by their total `tx_fee` to determine which of them gets evaluated first. let sql = " - SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate - FROM mempool - WHERE fee_rate IS NULL - "; - let mut query_stmt_null = self - .db - .prepare(&sql) - .map_err(|err| Error::SqliteError(err))?; - let mut null_iterator = query_stmt_null - .query(NO_PARAMS) - .map_err(|err| Error::SqliteError(err))?; - - let sql = " + WITH nonce_filtered AS ( + SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate, tx_fee + FROM mempool + LEFT JOIN nonces ON nonces.address = mempool.origin_address AND origin_nonce >= nonces.nonce + ), + address_nonce_ranked AS ( + SELECT *, ROW_NUMBER() OVER ( + PARTITION BY origin_address + ORDER BY origin_nonce ASC, fee_rate DESC, tx_fee DESC + ) AS rank + FROM nonce_filtered + ) SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate - FROM mempool - WHERE fee_rate IS NOT NULL - ORDER BY fee_rate DESC + FROM address_nonce_ranked + ORDER BY rank ASC, fee_rate DESC, tx_fee DESC "; - let mut query_stmt_fee = self + let mut query_stmt = self .db .prepare(&sql) .map_err(|err| Error::SqliteError(err))?; - let mut fee_iterator = query_stmt_fee + let mut tx_iterator = query_stmt .query(NO_PARAMS) .map_err(|err| Error::SqliteError(err))?; @@ -1688,9 +1699,6 @@ impl MemPoolDB { break MempoolIterationStopReason::DeadlineReached; } - let start_with_no_estimate = - tx_consideration_sampler.sample(&mut rng) < settings.consider_no_estimate_tx_prob; - // First, try to read from the retry list let (candidate, update_estimate) = match candidate_cache.next() { Some(tx) => { @@ -1698,36 +1706,16 @@ impl MemPoolDB { (tx, update_estimate) } None => { - // When the retry list is empty, read from the mempool db, - // randomly selecting from either the null fee-rate transactions - // or those with fee-rate estimates. - let opt_tx = if start_with_no_estimate { - null_iterator - .next() - .map_err(|err| Error::SqliteError(err))? - } else { - fee_iterator.next().map_err(|err| Error::SqliteError(err))? - }; - match opt_tx { - Some(row) => (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate), + // When the retry list is empty, read from the mempool db + match tx_iterator.next().map_err(|err| Error::SqliteError(err))? { + Some(row) => { + let tx = MemPoolTxInfoPartial::from_row(row)?; + let update_estimate = tx.fee_rate.is_none(); + (tx, update_estimate) + }, None => { - // If the selected iterator is empty, check the other - match if start_with_no_estimate { - fee_iterator.next().map_err(|err| Error::SqliteError(err))? - } else { - null_iterator - .next() - .map_err(|err| Error::SqliteError(err))? - } { - Some(row) => ( - MemPoolTxInfoPartial::from_row(row)?, - !start_with_no_estimate, - ), - None => { - debug!("No more transactions to consider in mempool"); - break MempoolIterationStopReason::NoMoreCandidates; - } - } + debug!("No more transactions to consider in mempool"); + break MempoolIterationStopReason::NoMoreCandidates; } } } @@ -1774,6 +1762,7 @@ impl MemPoolDB { "expected_origin_nonce" => expected_origin_nonce, "expected_sponsor_nonce" => expected_sponsor_nonce, ); + // FIXME: record this fact so we can take it into acct in the next pass // This transaction cannot execute in this pass, just drop it continue; } @@ -1928,10 +1917,8 @@ impl MemPoolDB { // drop these rusqlite statements and queries, since their existence as immutable borrows on the // connection prevents us from beginning a transaction below (which requires a mutable // borrow). - drop(null_iterator); - drop(fee_iterator); - drop(query_stmt_null); - drop(query_stmt_fee); + drop(tx_iterator); + drop(query_stmt); if retry_store.len() > 0 { let tx = self.tx_begin()?; From a0600b63a4bc4b91d74bc73a27c130c95b0b0859 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Sat, 7 Dec 2024 11:52:20 -0600 Subject: [PATCH 002/238] chore: remove dev comment --- stackslib/src/core/mempool.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 0ae18cd6266..5b42ccacc06 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1762,7 +1762,6 @@ impl MemPoolDB { "expected_origin_nonce" => expected_origin_nonce, "expected_sponsor_nonce" => expected_sponsor_nonce, ); - // FIXME: record this fact so we can take it into acct in the next pass // This transaction cannot execute in this pass, just drop it continue; } From 3719188ff2de9f737a6d50e7b809a4a3b6b4b5c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20C=C3=A1rdenas?= Date: Wed, 11 Dec 2024 10:10:48 -0500 Subject: [PATCH 003/238] style: lint fixes --- stackslib/src/core/mempool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 5b42ccacc06..958e0509786 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1712,7 +1712,7 @@ impl MemPoolDB { let tx = MemPoolTxInfoPartial::from_row(row)?; let update_estimate = tx.fee_rate.is_none(); (tx, update_estimate) - }, + } None => { debug!("No more transactions to consider in mempool"); break MempoolIterationStopReason::NoMoreCandidates; From 2cd116f2183e24e28e9928141a3683dd238edcff Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Fri, 13 Dec 2024 15:39:13 -0600 Subject: [PATCH 004/238] fix: add simulated fee rates for null --- stackslib/src/core/mempool.rs | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 5b42ccacc06..337b1122080 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1655,34 +1655,44 @@ impl MemPoolDB { // Iterate pending mempool transactions using a heuristic that maximizes miner fee profitability and minimizes CPU time // wasted on already-mined or not-yet-mineable transactions. This heuristic takes the following steps: // - // 1. Tries to filter out transactions that have nonces smaller than the origin address' next expected nonce as stated in - // the `nonces` table, if available - // 2. Groups remaining transactions by origin address and ranks them prioritizing those with smaller nonces and higher - // fees - // 3. Sorts all ranked transactions by fee and returns them for evaluation + // 1. Filters out transactions that have nonces smaller than the origin address' next expected nonce as stated in the + // `nonces` table, when possible + // 2. Adds a "simulated" fee rate to transactions that don't have it by multiplying the mempool's maximum current fee rate + // by a random number. This helps us mix these transactions with others to guarantee they get processed in a reasonable + // order + // 3. Ranks transactions by prioritizing those with next nonces and higher fees (per origin address) + // 4. Sorts all ranked transactions by fee and returns them for evaluation // // This logic prevents miners from repeatedly visiting (and then skipping) high fee transactions that would get evaluated // first based on their `fee_rate` but are otherwise non-mineable because they have very high or invalid nonces. A large // volume of these transactions would cause considerable slowness when selecting valid transactions to mine. // // This query also makes sure transactions that have NULL `fee_rate`s are visited, because they will also get ranked - // according to their nonce and then sub-sorted by their total `tx_fee` to determine which of them gets evaluated first. + // according to their origin address nonce. let sql = " WITH nonce_filtered AS ( SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate, tx_fee FROM mempool LEFT JOIN nonces ON nonces.address = mempool.origin_address AND origin_nonce >= nonces.nonce ), + null_compensated AS ( + SELECT *, + CASE + WHEN fee_rate IS NULL THEN (ABS(RANDOM()) % 10000 / 10000.0) * (SELECT MAX(fee_rate) AS max FROM nonce_filtered) + ELSE fee_rate + END AS sort_fee_rate + FROM nonce_filtered + ), address_nonce_ranked AS ( SELECT *, ROW_NUMBER() OVER ( PARTITION BY origin_address - ORDER BY origin_nonce ASC, fee_rate DESC, tx_fee DESC + ORDER BY origin_nonce ASC, sort_fee_rate DESC ) AS rank - FROM nonce_filtered + FROM null_compensated ) SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate FROM address_nonce_ranked - ORDER BY rank ASC, fee_rate DESC, tx_fee DESC + ORDER BY rank ASC, sort_fee_rate DESC "; let mut query_stmt = self .db @@ -1712,7 +1722,7 @@ impl MemPoolDB { let tx = MemPoolTxInfoPartial::from_row(row)?; let update_estimate = tx.fee_rate.is_none(); (tx, update_estimate) - }, + } None => { debug!("No more transactions to consider in mempool"); break MempoolIterationStopReason::NoMoreCandidates; From 685924ce42b67bff579931102c1063ba5bd758a3 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Fri, 13 Dec 2024 16:03:05 -0600 Subject: [PATCH 005/238] fix: indexes --- stackslib/src/core/mempool.rs | 42 ++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 337b1122080..69be7335dc2 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -820,6 +820,20 @@ const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &'static [&'static str] = &[ "#, ]; +const MEMPOOL_SCHEMA_8_NONCE_SORTING: &'static [&'static str] = &[ + r#" + -- Drop redundant mempool indexes, covered by unique constraints + DROP INDEX IF EXISTS "by_txid"; + DROP INDEX IF EXISTS "by_sponsor"; + DROP INDEX IF EXISTS "by_origin"; + -- Add index to help comparing address nonces against mempool content + CREATE INDEX IF NOT EXISTS by_address_nonce ON nonces(address, nonce); + "#, + r#" + INSERT INTO schema_version (version) VALUES (8) + "#, +]; + const MEMPOOL_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS by_txid ON mempool(txid);", "CREATE INDEX IF NOT EXISTS by_height ON mempool(height);", @@ -1393,6 +1407,16 @@ impl MemPoolDB { Ok(()) } + /// Optimize indexes for mempool visits + #[cfg_attr(test, mutants::skip)] + fn instantiate_schema_8(tx: &DBTx) -> Result<(), db_error> { + for sql_exec in MEMPOOL_SCHEMA_8_NONCE_SORTING { + tx.execute_batch(sql_exec)?; + } + + Ok(()) + } + #[cfg_attr(test, mutants::skip)] pub fn db_path(chainstate_root_path: &str) -> Result { let mut path = PathBuf::from(chainstate_root_path); @@ -1671,24 +1695,20 @@ impl MemPoolDB { // according to their origin address nonce. let sql = " WITH nonce_filtered AS ( - SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate, tx_fee + SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate, tx_fee, + CASE + WHEN fee_rate IS NULL THEN (ABS(RANDOM()) % 10000 / 10000.0) * (SELECT MAX(fee_rate) FROM mempool) + ELSE fee_rate + END AS sort_fee_rate FROM mempool - LEFT JOIN nonces ON nonces.address = mempool.origin_address AND origin_nonce >= nonces.nonce - ), - null_compensated AS ( - SELECT *, - CASE - WHEN fee_rate IS NULL THEN (ABS(RANDOM()) % 10000 / 10000.0) * (SELECT MAX(fee_rate) AS max FROM nonce_filtered) - ELSE fee_rate - END AS sort_fee_rate - FROM nonce_filtered + LEFT JOIN nonces ON mempool.origin_address = nonces.address AND mempool.origin_nonce >= nonces.nonce ), address_nonce_ranked AS ( SELECT *, ROW_NUMBER() OVER ( PARTITION BY origin_address ORDER BY origin_nonce ASC, sort_fee_rate DESC ) AS rank - FROM null_compensated + FROM nonce_filtered ) SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate FROM address_nonce_ranked From e1dac9d7f5f93ba9f332d098bdf36dbe77802770 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Fri, 13 Dec 2024 16:04:25 -0600 Subject: [PATCH 006/238] fix: remove tx_fee column --- stackslib/src/core/mempool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 69be7335dc2..cdb24f9c778 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1695,7 +1695,7 @@ impl MemPoolDB { // according to their origin address nonce. let sql = " WITH nonce_filtered AS ( - SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate, tx_fee, + SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate, CASE WHEN fee_rate IS NULL THEN (ABS(RANDOM()) % 10000 / 10000.0) * (SELECT MAX(fee_rate) FROM mempool) ELSE fee_rate From dd9729c9a98ac598d2477c68953fb7fa269908cf Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Sat, 14 Dec 2024 18:04:47 -0600 Subject: [PATCH 007/238] test: correct tx order --- .../stacks/tests/block_construction.rs | 185 ++++++++++++++++++ stackslib/src/core/mempool.rs | 19 +- 2 files changed, 194 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 7b7720b996a..ebc9c25212f 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5087,3 +5087,188 @@ fn paramaterized_mempool_walk_test( }, ); } + +#[test] +/// Test that the mempool walk query ignores old nonces and prefers next possible nonces before higher global fees. +fn mempool_walk_test_nonce_filtered_and_ranked() { + let key_address_pairs: Vec<(Secp256k1PrivateKey, StacksAddress)> = (0..3) + .map(|_user_index| { + let privk = StacksPrivateKey::new(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&privk)], + ) + .unwrap(); + (privk, addr) + }) + .collect(); + let origin_addresses: Vec = key_address_pairs + .iter() + .map(|(_, b)| b.to_string()) + .collect(); + let address_0 = origin_addresses[0].to_string(); + let address_1 = origin_addresses[1].to_string(); + let address_2 = origin_addresses[2].to_string(); + + let test_name = "mempool_walk_test_nonce_filtered_and_ranked"; + let mut peer_config = TestPeerConfig::new(test_name, 2002, 2003); + + peer_config.initial_balances = vec![]; + for (privk, addr) in &key_address_pairs { + peer_config + .initial_balances + .push((addr.to_account_principal(), 1000000000)); + } + + let recipient = + StacksAddress::from_string("ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV").unwrap(); + + let mut chainstate = + instantiate_chainstate_with_balances(false, 0x80000000, &test_name, vec![]); + let chainstate_path = chainstate_path(&test_name); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + let b_1 = make_block( + &mut chainstate, + ConsensusHash([0x1; 20]), + &( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ), + 1, + 1, + ); + let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); + + let mut tx_events = Vec::new(); + + // Submit nonces 0 through 9 for each of the 3 senders. + for nonce in 0..10 { + for user_index in 0..3 { + let mut tx = make_user_stacks_transfer( + &key_address_pairs[user_index].0, + nonce as u64, + 200, + &recipient.to_account_principal(), + 1, + ); + + let mut mempool_tx = mempool.tx_begin().unwrap(); + + let origin_address = tx.origin_address(); + let sponsor_address = tx.sponsor_address().unwrap_or(origin_address); + + tx.set_tx_fee(100); + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let tx_fee = tx.get_tx_fee(); + let height = 100; + + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &b_1.0, + &b_1.1, + true, + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + nonce.try_into().unwrap(), + &sponsor_address, + nonce.try_into().unwrap(), + None, + ) + .unwrap(); + + // Increase the `fee_rate` as nonce goes up, so we can test that lower nonces get confirmed before higher fee txs. + // Also slightly increase the fee for some addresses so we can check those txs get selected first. + mempool_tx + .execute( + "UPDATE mempool SET fee_rate = ? WHERE txid = ?", + params![Some(123.0 * (nonce + 1 + user_index) as f64), &txid], + ) + .unwrap(); + mempool_tx.commit().unwrap(); + } + } + + // Simulate next possible nonces for the 3 addresses: + // Address 0 => 2 + // Address 1 => 7 + // Address 2 => 9 + let mempool_tx = mempool.tx_begin().unwrap(); + mempool_tx + .execute( + "INSERT INTO nonces (address, nonce) VALUES (?, ?), (?, ?), (?, ?)", + params![address_0, 2, address_1, 7, address_2, 9], + ) + .unwrap(); + mempool_tx.commit().unwrap(); + + // Visit transactions. Keep a record of the order of visited txs so we can compare at the end. + let mut considered_txs = vec![]; + let deadline = get_epoch_time_ms() + 30000; + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b_2.0, &b_2.1), + |clarity_conn| { + // When the candidate cache fills, one pass cannot process all transactions + loop { + if mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + &mut tx_events, + MemPoolWalkSettings::default(), + |_, available_tx, _| { + considered_txs.push(( + available_tx.tx.metadata.origin_address.to_string(), + available_tx.tx.metadata.origin_nonce, + )); + Ok(Some( + // Generate any success result + TransactionResult::success( + &available_tx.tx.tx, + available_tx.tx.metadata.tx_fee, + StacksTransactionReceipt::from_stx_transfer( + available_tx.tx.tx.clone(), + vec![], + Value::okay(Value::Bool(true)).unwrap(), + ExecutionCost::zero(), + ), + ) + .convert_to_event(), + )) + }, + ) + .unwrap() + .0 + == 0 + { + break; + } + assert!(get_epoch_time_ms() < deadline, "test timed out"); + } + assert_eq!( + considered_txs, + vec![ + (address_0.clone(), 2), + (address_0.clone(), 3), + (address_0.clone(), 4), + (address_0.clone(), 5), + (address_0.clone(), 6), + (address_1.clone(), 7), // Higher fee for address 1 + (address_0.clone(), 7), + (address_1.clone(), 8), + (address_0.clone(), 8), + (address_2.clone(), 9), // Higher fee for address 2 + (address_1.clone(), 9), + (address_0.clone(), 9), + ], + "Mempool should visit transactions in the correct order while ignoring past nonces", + ); + }, + ); +} diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index cdb24f9c778..38322e22454 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -525,8 +525,7 @@ pub struct MemPoolWalkSettings { /// milliseconds. This is a soft deadline. pub max_walk_time_ms: u64, /// Probability percentage to consider a transaction which has not received a cost estimate. - /// That is, with x%, when picking the next transaction to include a block, select one that - /// either failed to get a cost estimate or has not been estimated yet. + /// This property is no longer used and will be ignored. pub consider_no_estimate_tx_prob: u8, /// Size of the nonce cache. This avoids MARF look-ups. pub nonce_cache_size: u64, @@ -1689,10 +1688,9 @@ impl MemPoolDB { // // This logic prevents miners from repeatedly visiting (and then skipping) high fee transactions that would get evaluated // first based on their `fee_rate` but are otherwise non-mineable because they have very high or invalid nonces. A large - // volume of these transactions would cause considerable slowness when selecting valid transactions to mine. - // - // This query also makes sure transactions that have NULL `fee_rate`s are visited, because they will also get ranked - // according to their origin address nonce. + // volume of these transactions would cause considerable slowness when selecting valid transactions to mine. This query + // also makes sure transactions that have NULL `fee_rate`s are visited, because they will also get ranked according to + // their origin address nonce. let sql = " WITH nonce_filtered AS ( SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate, @@ -1704,10 +1702,11 @@ impl MemPoolDB { LEFT JOIN nonces ON mempool.origin_address = nonces.address AND mempool.origin_nonce >= nonces.nonce ), address_nonce_ranked AS ( - SELECT *, ROW_NUMBER() OVER ( - PARTITION BY origin_address - ORDER BY origin_nonce ASC, sort_fee_rate DESC - ) AS rank + SELECT *, + ROW_NUMBER() OVER ( + PARTITION BY origin_address + ORDER BY origin_nonce ASC, sort_fee_rate DESC + ) AS rank FROM nonce_filtered ) SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate From 7702f67f9cc6097f680bf5c4542a97f5dddfedb6 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Mon, 16 Dec 2024 12:39:08 -0600 Subject: [PATCH 008/238] fix: nonce ordering --- .../chainstate/stacks/tests/block_construction.rs | 14 +++++++------- stackslib/src/core/mempool.rs | 3 ++- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index ebc9c25212f..39000825e4b 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5183,12 +5183,12 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { ) .unwrap(); - // Increase the `fee_rate` as nonce goes up, so we can test that lower nonces get confirmed before higher fee txs. + // Increase the `fee_rate` as nonce goes up, so we can test that next nonces get confirmed before higher fee txs. // Also slightly increase the fee for some addresses so we can check those txs get selected first. mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - params![Some(123.0 * (nonce + 1 + user_index) as f64), &txid], + params![Some(100.0 * (nonce + 1 + user_index) as f64), &txid], ) .unwrap(); mempool_tx.commit().unwrap(); @@ -5254,18 +5254,18 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { assert_eq!( considered_txs, vec![ + (address_2.clone(), 9), // Highest fee for address 2, and 9 is the next nonce + (address_1.clone(), 7), (address_0.clone(), 2), + (address_1.clone(), 8), (address_0.clone(), 3), + (address_1.clone(), 9), // Highest fee for address 1, but have to confirm nonces 7 and 8 first (address_0.clone(), 4), (address_0.clone(), 5), (address_0.clone(), 6), - (address_1.clone(), 7), // Higher fee for address 1 (address_0.clone(), 7), - (address_1.clone(), 8), (address_0.clone(), 8), - (address_2.clone(), 9), // Higher fee for address 2 - (address_1.clone(), 9), - (address_0.clone(), 9), + (address_0.clone(), 9), // Highest fee for address 0, but have to confirm all other nonces first ], "Mempool should visit transactions in the correct order while ignoring past nonces", ); diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 38322e22454..0ff1d8c28f8 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1699,7 +1699,8 @@ impl MemPoolDB { ELSE fee_rate END AS sort_fee_rate FROM mempool - LEFT JOIN nonces ON mempool.origin_address = nonces.address AND mempool.origin_nonce >= nonces.nonce + LEFT JOIN nonces ON mempool.origin_address = nonces.address + WHERE nonces.address IS NULL OR mempool.origin_nonce >= nonces.nonce ), address_nonce_ranked AS ( SELECT *, From d0d1f8d183ed8d38269c2a41c1a1d7db2a79d092 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Mon, 16 Dec 2024 12:43:02 -0600 Subject: [PATCH 009/238] fix: remove now unneccessary index --- stackslib/src/core/mempool.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 0ff1d8c28f8..8871e3b9d3b 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -825,8 +825,6 @@ const MEMPOOL_SCHEMA_8_NONCE_SORTING: &'static [&'static str] = &[ DROP INDEX IF EXISTS "by_txid"; DROP INDEX IF EXISTS "by_sponsor"; DROP INDEX IF EXISTS "by_origin"; - -- Add index to help comparing address nonces against mempool content - CREATE INDEX IF NOT EXISTS by_address_nonce ON nonces(address, nonce); "#, r#" INSERT INTO schema_version (version) VALUES (8) From cf6c37b2b0b6bdb6c1f33855612a7fbd3be0c7f3 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Mon, 16 Dec 2024 23:23:15 -0600 Subject: [PATCH 010/238] chore: config strategy --- .../stacks/tests/block_construction.rs | 5 ++++- testnet/stacks-node/src/config.rs | 20 ++++++++++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 2c160941358..4402dd13e67 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -30,6 +30,7 @@ use clarity::vm::costs::LimitedCostTracker; use clarity::vm::database::ClarityDatabase; use clarity::vm::test_util::TEST_BURN_STATE_DB; use clarity::vm::types::*; +use mempool::MemPoolWalkStrategy; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; use rusqlite::params; @@ -5209,6 +5210,8 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { mempool_tx.commit().unwrap(); // Visit transactions. Keep a record of the order of visited txs so we can compare at the end. + let mut settings = MemPoolWalkSettings::default(); + settings.strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; let mut considered_txs = vec![]; let deadline = get_epoch_time_ms() + 30000; chainstate.with_read_only_clarity_tx( @@ -5221,7 +5224,7 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - MemPoolWalkSettings::default(), + settings, |_, available_tx, _| { considered_txs.push(( available_tx.tx.metadata.origin_address.to_string(), diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 4ad793a4c30..5d6bffdeb94 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -36,7 +36,7 @@ use stacks::chainstate::stacks::index::marf::MARFOpenOpts; use stacks::chainstate::stacks::index::storage::TrieHashCalculationMode; use stacks::chainstate::stacks::miner::{BlockBuilderSettings, MinerStatus}; use stacks::chainstate::stacks::MAX_BLOCK_LEN; -use stacks::core::mempool::{MemPoolWalkSettings, MemPoolWalkTxTypes}; +use stacks::core::mempool::{MemPoolWalkSettings, MemPoolWalkStrategy, MemPoolWalkTxTypes}; use stacks::core::{ MemPoolDB, StacksEpoch, StacksEpochExtension, StacksEpochId, BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, @@ -1060,6 +1060,7 @@ impl Config { BlockBuilderSettings { max_miner_time_ms: miner_config.nakamoto_attempt_time_ms, mempool_settings: MemPoolWalkSettings { + strategy: miner_config.mempool_walk_strategy, max_walk_time_ms: miner_config.nakamoto_attempt_time_ms, consider_no_estimate_tx_prob: miner_config.probability_pick_no_estimate_tx, nonce_cache_size: miner_config.nonce_cache_size, @@ -1103,6 +1104,7 @@ impl Config { // second or later attempt to mine a block -- give it some time miner_config.subsequent_attempt_time_ms }, + strategy: miner_config.mempool_walk_strategy, consider_no_estimate_tx_prob: miner_config.probability_pick_no_estimate_tx, nonce_cache_size: miner_config.nonce_cache_size, candidate_retry_cache_size: miner_config.candidate_retry_cache_size, @@ -2092,6 +2094,8 @@ pub struct MinerConfig { pub microblock_attempt_time_ms: u64, /// Max time to assemble Nakamoto block pub nakamoto_attempt_time_ms: u64, + /// Strategy to follow when picking next mempool transactions to consider. + pub mempool_walk_strategy: MemPoolWalkStrategy, pub probability_pick_no_estimate_tx: u8, pub block_reward_recipient: Option, /// If possible, mine with a p2wpkh address @@ -2170,6 +2174,7 @@ impl Default for MinerConfig { activated_vrf_key_path: None, fast_rampup: false, underperform_stop_threshold: None, + mempool_walk_strategy: MemPoolWalkStrategy::GlobalFeeRate, txs_to_consider: MemPoolWalkTxTypes::all(), filter_origins: HashSet::new(), max_reorg_depth: 3, @@ -2542,6 +2547,7 @@ pub struct MinerConfigFile { pub subsequent_attempt_time_ms: Option, pub microblock_attempt_time_ms: Option, pub nakamoto_attempt_time_ms: Option, + pub mempool_walk_strategy: Option, pub probability_pick_no_estimate_tx: Option, pub block_reward_recipient: Option, pub segwit: Option, @@ -2658,6 +2664,18 @@ impl MinerConfigFile { activated_vrf_key_path: self.activated_vrf_key_path.clone(), fast_rampup: self.fast_rampup.unwrap_or(miner_default_config.fast_rampup), underperform_stop_threshold: self.underperform_stop_threshold, + mempool_walk_strategy: { + if let Some(mempool_walk_strategy) = &self.mempool_walk_strategy { + match str::parse(&mempool_walk_strategy) { + Ok(strategy) => strategy, + Err(e) => { + panic!("could not parse '{mempool_walk_strategy}': {e}"); + }, + } + } else { + MemPoolWalkStrategy::GlobalFeeRate + } + }, txs_to_consider: { if let Some(txs_to_consider) = &self.txs_to_consider { txs_to_consider From b8b9b89d0b2a694aa7b175ce2ec8a26b4b590fed Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Mon, 16 Dec 2024 23:45:16 -0600 Subject: [PATCH 011/238] chore: strategy selection draft --- .../stacks/tests/block_construction.rs | 8 +- stackslib/src/core/mempool.rs | 177 +++++++++++++++--- 2 files changed, 152 insertions(+), 33 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 4402dd13e67..f9f46002301 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5210,8 +5210,8 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { mempool_tx.commit().unwrap(); // Visit transactions. Keep a record of the order of visited txs so we can compare at the end. - let mut settings = MemPoolWalkSettings::default(); - settings.strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + let mut mempool_settings = MemPoolWalkSettings::default(); + mempool_settings.strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; let mut considered_txs = vec![]; let deadline = get_epoch_time_ms() + 30000; chainstate.with_read_only_clarity_tx( @@ -5224,7 +5224,7 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { .iterate_candidates::<_, ChainstateError, _>( clarity_conn, &mut tx_events, - settings, + mempool_settings.clone(), |_, available_tx, _| { considered_txs.push(( available_tx.tx.metadata.origin_address.to_string(), @@ -5239,7 +5239,7 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 8871e3b9d3b..9d94a7d10bc 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -29,7 +29,8 @@ use rand::distributions::Uniform; use rand::prelude::Distribution; use rusqlite::types::ToSql; use rusqlite::{ - params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Transaction, + params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Statement, + Transaction, }; use siphasher::sip::SipHasher; // this is SipHash-2-4 use stacks_common::codec::{ @@ -519,13 +520,40 @@ impl MemPoolWalkTxTypes { } } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum MemPoolWalkStrategy { + /// Select transactions with the highest global fee rate. + GlobalFeeRate, + /// Select transactions with the next expected nonce for origin and sponsor addresses, + NextNonceWithHighestFeeRate, +} + +impl FromStr for MemPoolWalkStrategy { + type Err = &'static str; + fn from_str(s: &str) -> Result { + match s { + "GlobalFeeRate" => { + return Ok(Self::GlobalFeeRate); + } + "NextNonceWithHighestFeeRate" => { + return Ok(Self::NextNonceWithHighestFeeRate); + } + _ => { + return Err("Unknown mempool walk strategy"); + } + } + } +} + #[derive(Debug, Clone)] pub struct MemPoolWalkSettings { + /// Strategy to use when selecting the next transactions to consider in the `mempool` table. + pub strategy: MemPoolWalkStrategy, /// Maximum amount of time a miner will spend walking through mempool transactions, in /// milliseconds. This is a soft deadline. pub max_walk_time_ms: u64, /// Probability percentage to consider a transaction which has not received a cost estimate. - /// This property is no longer used and will be ignored. + /// Only used when walk strategy is `GlobalFeeRate`. pub consider_no_estimate_tx_prob: u8, /// Size of the nonce cache. This avoids MARF look-ups. pub nonce_cache_size: u64, @@ -544,6 +572,7 @@ pub struct MemPoolWalkSettings { impl Default for MemPoolWalkSettings { fn default() -> Self { MemPoolWalkSettings { + strategy: MemPoolWalkStrategy::GlobalFeeRate, max_walk_time_ms: u64::MAX, consider_no_estimate_tx_prob: 5, nonce_cache_size: 1024 * 1024, @@ -563,6 +592,7 @@ impl Default for MemPoolWalkSettings { impl MemPoolWalkSettings { pub fn zero() -> MemPoolWalkSettings { MemPoolWalkSettings { + strategy: MemPoolWalkStrategy::GlobalFeeRate, max_walk_time_ms: u64::MAX, consider_no_estimate_tx_prob: 5, nonce_cache_size: 1024 * 1024, @@ -1318,6 +1348,9 @@ impl MemPoolDB { MemPoolDB::instantiate_schema_7(tx)?; } 7 => { + MemPoolDB::instantiate_schema_8(tx)?; + } + 8 => { break; } _ => { @@ -1673,16 +1706,50 @@ impl MemPoolDB { // single transaction. This cannot grow to more than `settings.nonce_cache_size` entries. let mut retry_store = HashMap::new(); - // Iterate pending mempool transactions using a heuristic that maximizes miner fee profitability and minimizes CPU time - // wasted on already-mined or not-yet-mineable transactions. This heuristic takes the following steps: + // == Queries for `GlobalFeeRate` mempool walk strategy + // + // Selects mempool transactions only based on their fee rate. Transactions with NULL fee rates get randomly selected for + // consideration. + let tx_consideration_sampler = Uniform::new(0, 100); + let mut rng = rand::thread_rng(); + let sql = " + SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate + FROM mempool + WHERE fee_rate IS NULL + "; + let mut query_stmt_null = self + .db + .prepare(&sql) + .map_err(|err| Error::SqliteError(err))?; + let mut null_iterator = query_stmt_null + .query(NO_PARAMS) + .map_err(|err| Error::SqliteError(err))?; + let sql = " + SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate + FROM mempool + WHERE fee_rate IS NOT NULL + ORDER BY fee_rate DESC + "; + let mut query_stmt_fee = self + .db + .prepare(&sql) + .map_err(|err| Error::SqliteError(err))?; + let mut fee_iterator = query_stmt_fee + .query(NO_PARAMS) + .map_err(|err| Error::SqliteError(err))?; + + // == Query for `NextNonceWithHighestFeeRate` mempool walk strategy + // + // Selects the next mempool transaction to consider using a heuristic that maximizes miner fee profitability and minimizes + // CPU time wasted on already-mined or not-yet-mineable transactions. This heuristic takes the following steps: // - // 1. Filters out transactions that have nonces smaller than the origin address' next expected nonce as stated in the - // `nonces` table, when possible + // 1. Filters out transactions that have nonces smaller than the origin and sponsor address' next expected nonce as stated + // in the `nonces` table, when possible // 2. Adds a "simulated" fee rate to transactions that don't have it by multiplying the mempool's maximum current fee rate // by a random number. This helps us mix these transactions with others to guarantee they get processed in a reasonable // order - // 3. Ranks transactions by prioritizing those with next nonces and higher fees (per origin address) - // 4. Sorts all ranked transactions by fee and returns them for evaluation + // 3. Ranks transactions by prioritizing those with next nonces and higher fees (per origin and sponsor address) + // 4. Takes the top ranked transaction and returns it for evaluation // // This logic prevents miners from repeatedly visiting (and then skipping) high fee transactions that would get evaluated // first based on their `fee_rate` but are otherwise non-mineable because they have very high or invalid nonces. A large @@ -1696,29 +1763,33 @@ impl MemPoolDB { WHEN fee_rate IS NULL THEN (ABS(RANDOM()) % 10000 / 10000.0) * (SELECT MAX(fee_rate) FROM mempool) ELSE fee_rate END AS sort_fee_rate - FROM mempool - LEFT JOIN nonces ON mempool.origin_address = nonces.address - WHERE nonces.address IS NULL OR mempool.origin_nonce >= nonces.nonce + FROM mempool AS m + LEFT JOIN nonces AS no ON m.origin_address = no.address + LEFT JOIN nonces AS ns ON m.sponsor_address = ns.address + WHERE (no.address IS NULL OR m.origin_nonce >= no.nonce) + AND (ns.address IS NULL OR m.sponsor_nonce >= ns.nonce) ), address_nonce_ranked AS ( SELECT *, ROW_NUMBER() OVER ( PARTITION BY origin_address ORDER BY origin_nonce ASC, sort_fee_rate DESC - ) AS rank + ) AS origin_rank, + ROW_NUMBER() OVER ( + PARTITION BY sponsor_address + ORDER BY sponsor_nonce ASC, sort_fee_rate DESC + ) AS sponsor_rank FROM nonce_filtered ) SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate FROM address_nonce_ranked - ORDER BY rank ASC, sort_fee_rate DESC + ORDER BY origin_rank ASC, sponsor_rank ASC, sort_fee_rate DESC + LIMIT 1 "; - let mut query_stmt = self + let mut query_stmt_nonce_rank = self .db .prepare(&sql) .map_err(|err| Error::SqliteError(err))?; - let mut tx_iterator = query_stmt - .query(NO_PARAMS) - .map_err(|err| Error::SqliteError(err))?; let stop_reason = loop { if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { @@ -1734,16 +1805,61 @@ impl MemPoolDB { (tx, update_estimate) } None => { - // When the retry list is empty, read from the mempool db - match tx_iterator.next().map_err(|err| Error::SqliteError(err))? { - Some(row) => { - let tx = MemPoolTxInfoPartial::from_row(row)?; - let update_estimate = tx.fee_rate.is_none(); - (tx, update_estimate) - } - None => { - debug!("No more transactions to consider in mempool"); - break MempoolIterationStopReason::NoMoreCandidates; + // When the retry list is empty, read from the mempool db depending on the configured miner strategy + match settings.strategy { + MemPoolWalkStrategy::GlobalFeeRate => { + let start_with_no_estimate = + tx_consideration_sampler.sample(&mut rng) < settings.consider_no_estimate_tx_prob; + // randomly select from either the null fee-rate transactions or those with fee-rate estimates. + let opt_tx = if start_with_no_estimate { + null_iterator + .next() + .map_err(|err| Error::SqliteError(err))? + } else { + fee_iterator.next().map_err(|err| Error::SqliteError(err))? + }; + match opt_tx { + Some(row) => (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate), + None => { + // If the selected iterator is empty, check the other + match if start_with_no_estimate { + fee_iterator.next().map_err(|err| Error::SqliteError(err))? + } else { + null_iterator + .next() + .map_err(|err| Error::SqliteError(err))? + } { + Some(row) => ( + MemPoolTxInfoPartial::from_row(row)?, + !start_with_no_estimate, + ), + None => { + debug!("No more transactions to consider in mempool"); + break MempoolIterationStopReason::NoMoreCandidates; + } + } + } + } + }, + MemPoolWalkStrategy::NextNonceWithHighestFeeRate => { + // Execute the query to get a single row. We do not use an iterator because we want the top rank to be + // recalculated every time we visit a transaction. + match query_stmt_nonce_rank + .query(NO_PARAMS) + .map_err(|err| Error::SqliteError(err))? + .next() + .map_err(|err| Error::SqliteError(err))? + { + Some(row) => { + let tx = MemPoolTxInfoPartial::from_row(row)?; + let update_estimate = tx.fee_rate.is_none(); + (tx, update_estimate) + }, + None => { + debug!("No more transactions to consider in mempool"); + break MempoolIterationStopReason::NoMoreCandidates; + } + } } } } @@ -1944,8 +2060,11 @@ impl MemPoolDB { // drop these rusqlite statements and queries, since their existence as immutable borrows on the // connection prevents us from beginning a transaction below (which requires a mutable // borrow). - drop(tx_iterator); - drop(query_stmt); + drop(null_iterator); + drop(query_stmt_null); + drop(fee_iterator); + drop(query_stmt_fee); + drop(query_stmt_nonce_rank); if retry_store.len() > 0 { let tx = self.tx_begin()?; From ea49875acb4d20b0775ba291a29470455f77b591 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Tue, 17 Dec 2024 14:35:08 -0600 Subject: [PATCH 012/238] fix: correct tx confirmation order --- .../src/chainstate/stacks/tests/block_construction.rs | 8 ++++---- stackslib/src/core/mempool.rs | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index f9f46002301..c29276613ec 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5113,8 +5113,8 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { let address_1 = origin_addresses[1].to_string(); let address_2 = origin_addresses[2].to_string(); - let test_name = "mempool_walk_test_nonce_filtered_and_ranked"; - let mut peer_config = TestPeerConfig::new(test_name, 2002, 2003); + let test_name = function_name!(); + let mut peer_config = TestPeerConfig::new(&test_name, 0, 0); peer_config.initial_balances = vec![]; for (privk, addr) in &key_address_pairs { @@ -5259,10 +5259,10 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { vec![ (address_2.clone(), 9), // Highest fee for address 2, and 9 is the next nonce (address_1.clone(), 7), - (address_0.clone(), 2), (address_1.clone(), 8), - (address_0.clone(), 3), (address_1.clone(), 9), // Highest fee for address 1, but have to confirm nonces 7 and 8 first + (address_0.clone(), 2), + (address_0.clone(), 3), (address_0.clone(), 4), (address_0.clone(), 5), (address_0.clone(), 6), diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 9d94a7d10bc..d9a462099ea 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1840,7 +1840,7 @@ impl MemPoolDB { } } } - }, + } MemPoolWalkStrategy::NextNonceWithHighestFeeRate => { // Execute the query to get a single row. We do not use an iterator because we want the top rank to be // recalculated every time we visit a transaction. From a56baef832146df342a997e6a19f8126931fc22c Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Tue, 17 Dec 2024 22:28:15 -0600 Subject: [PATCH 013/238] test: success --- .../stacks/tests/block_construction.rs | 212 +++++++++++------- stackslib/src/chainstate/stacks/tests/mod.rs | 31 +++ stackslib/src/core/mempool.rs | 4 +- 3 files changed, 166 insertions(+), 81 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index c29276613ec..05583f85ba3 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5091,8 +5091,8 @@ fn paramaterized_mempool_walk_test( #[test] /// Test that the mempool walk query ignores old nonces and prefers next possible nonces before higher global fees. -fn mempool_walk_test_nonce_filtered_and_ranked() { - let key_address_pairs: Vec<(Secp256k1PrivateKey, StacksAddress)> = (0..3) +fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { + let key_address_pairs: Vec<(Secp256k1PrivateKey, StacksAddress)> = (0..6) .map(|_user_index| { let privk = StacksPrivateKey::new(); let addr = StacksAddress::from_public_keys( @@ -5105,17 +5105,19 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { (privk, addr) }) .collect(); - let origin_addresses: Vec = key_address_pairs + let accounts: Vec = key_address_pairs .iter() .map(|(_, b)| b.to_string()) .collect(); - let address_0 = origin_addresses[0].to_string(); - let address_1 = origin_addresses[1].to_string(); - let address_2 = origin_addresses[2].to_string(); + let address_0 = accounts[0].to_string(); + let address_1 = accounts[1].to_string(); + let address_2 = accounts[2].to_string(); + let address_3 = accounts[3].to_string(); + let address_4 = accounts[4].to_string(); + let address_5 = accounts[5].to_string(); let test_name = function_name!(); let mut peer_config = TestPeerConfig::new(&test_name, 0, 0); - peer_config.initial_balances = vec![]; for (privk, addr) in &key_address_pairs { peer_config @@ -5144,71 +5146,112 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { let mut tx_events = Vec::new(); - // Submit nonces 0 through 9 for each of the 3 senders. - for nonce in 0..10 { - for user_index in 0..3 { - let mut tx = make_user_stacks_transfer( - &key_address_pairs[user_index].0, - nonce as u64, + // Simulate next possible nonces for all addresses + let mempool_tx = mempool.tx_begin().unwrap(); + mempool_tx + .execute( + "INSERT INTO nonces (address, nonce) VALUES (?, ?), (?, ?), (?, ?), (?, ?), (?, ?), (?, ?)", + params![address_0, 2, address_1, 1, address_2, 6, address_3, 0, address_4, 1, address_5, 0], + ) + .unwrap(); + mempool_tx.commit().unwrap(); + + // Test vectors with a wide variety of origin/sponsor configurations and fee rate values. Some transactions do not have a + // sponsor, some others do, some others are sponsored by other sponsors. All in flight at the same time. + // + // tuple shape -> (origin_address_index, origin_nonce, sponsor_address_index, sponsor_nonce, fee_rate) + let test_vectors = vec![ + (0, 0, 0, 0, 100.0), // Old origin nonce - ignored + (0, 1, 0, 1, 200.0), // Old origin nonce - ignored + (0, 2, 0, 2, 300.0), + (0, 3, 0, 3, 400.0), + (0, 4, 3, 0, 500.0), + (1, 0, 1, 0, 400.0), // Old origin nonce - ignored + (1, 1, 3, 1, 600.0), + (1, 2, 3, 2, 700.0), + (1, 3, 3, 3, 800.0), + (1, 4, 1, 4, 1200.0), + (2, 3, 2, 3, 9000.0), // Old origin nonce - ignored + (2, 4, 2, 4, 9000.0), // Old origin nonce - ignored + (2, 5, 2, 5, 9000.0), // Old origin nonce - ignored + (2, 6, 4, 0, 900.0), // Old sponsor nonce - ignored + (2, 6, 4, 1, 1000.0), + (2, 7, 4, 2, 800.0), + (2, 8, 2, 8, 1000.0), + (2, 9, 3, 5, 1000.0), + (2, 10, 3, 6, 1500.0), + (3, 4, 3, 4, 100.0), + (4, 3, 5, 2, 500.0), + (5, 0, 5, 0, 500.0), + (5, 1, 5, 1, 500.0), + (5, 3, 4, 4, 2000.0), + (5, 4, 4, 5, 2000.0), + ]; + for (origin_index, origin_nonce, sponsor_index, sponsor_nonce, fee_rate) in + test_vectors.into_iter() + { + let mut tx = if origin_index != sponsor_index { + let payload = TransactionPayload::TokenTransfer( + recipient.to_account_principal(), + 1, + TokenTransferMemo([0; 34]), + ); + sign_sponsored_singlesig_tx( + payload.into(), + &key_address_pairs[origin_index].0, + &key_address_pairs[sponsor_index].0, + origin_nonce, + sponsor_nonce, + 200, + ) + } else { + make_user_stacks_transfer( + &key_address_pairs[origin_index].0, + origin_nonce, 200, &recipient.to_account_principal(), 1, - ); - - let mut mempool_tx = mempool.tx_begin().unwrap(); - - let origin_address = tx.origin_address(); - let sponsor_address = tx.sponsor_address().unwrap_or(origin_address); - - tx.set_tx_fee(100); - let txid = tx.txid(); - let tx_bytes = tx.serialize_to_vec(); - let tx_fee = tx.get_tx_fee(); - let height = 100; + ) + }; + + let mut mempool_tx = mempool.tx_begin().unwrap(); + + let origin_address = tx.origin_address(); + let sponsor_address = tx.sponsor_address().unwrap_or(origin_address); + + tx.set_tx_fee(fee_rate as u64); + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let tx_fee = tx.get_tx_fee(); + let height = 100; + + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &b_1.0, + &b_1.1, + true, + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + origin_nonce, + &sponsor_address, + sponsor_nonce, + None, + ) + .unwrap(); - MemPoolDB::try_add_tx( - &mut mempool_tx, - &mut chainstate, - &b_1.0, - &b_1.1, - true, - txid, - tx_bytes, - tx_fee, - height, - &origin_address, - nonce.try_into().unwrap(), - &sponsor_address, - nonce.try_into().unwrap(), - None, + mempool_tx + .execute( + "UPDATE mempool SET fee_rate = ? WHERE txid = ?", + params![Some(fee_rate), &txid], ) .unwrap(); - - // Increase the `fee_rate` as nonce goes up, so we can test that next nonces get confirmed before higher fee txs. - // Also slightly increase the fee for some addresses so we can check those txs get selected first. - mempool_tx - .execute( - "UPDATE mempool SET fee_rate = ? WHERE txid = ?", - params![Some(100.0 * (nonce + 1 + user_index) as f64), &txid], - ) - .unwrap(); - mempool_tx.commit().unwrap(); - } + mempool_tx.commit().unwrap(); } - // Simulate next possible nonces for the 3 addresses: - // Address 0 => 2 - // Address 1 => 7 - // Address 2 => 9 - let mempool_tx = mempool.tx_begin().unwrap(); - mempool_tx - .execute( - "INSERT INTO nonces (address, nonce) VALUES (?, ?), (?, ?), (?, ?)", - params![address_0, 2, address_1, 7, address_2, 9], - ) - .unwrap(); - mempool_tx.commit().unwrap(); - // Visit transactions. Keep a record of the order of visited txs so we can compare at the end. let mut mempool_settings = MemPoolWalkSettings::default(); mempool_settings.strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; @@ -5229,6 +5272,9 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { considered_txs.push(( available_tx.tx.metadata.origin_address.to_string(), available_tx.tx.metadata.origin_nonce, + available_tx.tx.metadata.sponsor_address.to_string(), + available_tx.tx.metadata.sponsor_nonce, + available_tx.tx.metadata.tx_fee, )); Ok(Some( // Generate any success result @@ -5254,22 +5300,30 @@ fn mempool_walk_test_nonce_filtered_and_ranked() { } assert!(get_epoch_time_ms() < deadline, "test timed out"); } + + // Expected transaction consideration order, sorted by mineable first (next origin+sponsor nonces, highest fee). + let expected_tx_order = vec![ + (address_2.clone(), 6, address_4.clone(), 1, 1000), + (address_2.clone(), 7, address_4.clone(), 2, 800), + (address_2.clone(), 8, address_2.clone(), 8, 1000), + (address_5.clone(), 0, address_5.clone(), 0, 500), + (address_5.clone(), 1, address_5.clone(), 1, 500), + (address_4.clone(), 3, address_5.clone(), 2, 500), + (address_5.clone(), 3, address_4.clone(), 4, 2000), + (address_5.clone(), 4, address_4.clone(), 5, 2000), + (address_0.clone(), 2, address_0.clone(), 2, 300), + (address_0.clone(), 3, address_0.clone(), 3, 400), + (address_0.clone(), 4, address_3.clone(), 0, 500), + (address_1.clone(), 1, address_3.clone(), 1, 600), + (address_1.clone(), 2, address_3.clone(), 2, 700), + (address_1.clone(), 3, address_3.clone(), 3, 800), + (address_1.clone(), 4, address_1.clone(), 4, 1200), + (address_3.clone(), 4, address_3.clone(), 4, 100), + (address_2.clone(), 9, address_3.clone(), 5, 1000), + (address_2.clone(), 10, address_3.clone(), 6, 1500), + ]; assert_eq!( - considered_txs, - vec![ - (address_2.clone(), 9), // Highest fee for address 2, and 9 is the next nonce - (address_1.clone(), 7), - (address_1.clone(), 8), - (address_1.clone(), 9), // Highest fee for address 1, but have to confirm nonces 7 and 8 first - (address_0.clone(), 2), - (address_0.clone(), 3), - (address_0.clone(), 4), - (address_0.clone(), 5), - (address_0.clone(), 6), - (address_0.clone(), 7), - (address_0.clone(), 8), - (address_0.clone(), 9), // Highest fee for address 0, but have to confirm all other nonces first - ], + considered_txs, expected_tx_order, "Mempool should visit transactions in the correct order while ignoring past nonces", ); }, diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 9a6a84507ef..6e2ba7b4487 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -1396,6 +1396,37 @@ pub fn sign_standard_singlesig_tx( tx_signer.get_tx().unwrap() } +pub fn sign_sponsored_singlesig_tx( + payload: TransactionPayload, + origin: &StacksPrivateKey, + sponsor: &StacksPrivateKey, + origin_nonce: u64, + sponsor_nonce: u64, + tx_fee: u64, +) -> StacksTransaction { + let mut origin_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(origin)) + .expect("Failed to create p2pkh spending condition from public key."); + origin_spending_condition.set_nonce(origin_nonce); + origin_spending_condition.set_tx_fee(tx_fee); + let mut sponsored_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sponsor)) + .expect("Failed to create p2pkh spending condition from public key."); + sponsored_spending_condition.set_nonce(sponsor_nonce); + sponsored_spending_condition.set_tx_fee(tx_fee); + let auth = TransactionAuth::Sponsored(origin_spending_condition, sponsored_spending_condition); + let mut unsigned_tx = StacksTransaction::new(TransactionVersion::Testnet, auth, payload); + + unsigned_tx.chain_id = 0x80000000; + unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; + + let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); + tx_signer.sign_origin(origin).unwrap(); + tx_signer.sign_sponsor(sponsor).unwrap(); + + tx_signer.get_tx().unwrap() +} + pub fn get_stacks_account(peer: &mut TestPeer, addr: &PrincipalData) -> StacksAccount { let account = peer .with_db_state(|ref mut sortdb, ref mut chainstate, _, _| { diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index d9a462099ea..eded6a6416f 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1766,8 +1766,8 @@ impl MemPoolDB { FROM mempool AS m LEFT JOIN nonces AS no ON m.origin_address = no.address LEFT JOIN nonces AS ns ON m.sponsor_address = ns.address - WHERE (no.address IS NULL OR m.origin_nonce >= no.nonce) - AND (ns.address IS NULL OR m.sponsor_nonce >= ns.nonce) + WHERE (no.address IS NULL OR m.origin_nonce = no.nonce) + AND (ns.address IS NULL OR m.sponsor_nonce = ns.nonce) ), address_nonce_ranked AS ( SELECT *, From a854f3b5ce2dd2b31508c5dcfd96cf34c0757a26 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Tue, 17 Dec 2024 23:02:48 -0600 Subject: [PATCH 014/238] test: missing nonces from table --- .../stacks/tests/block_construction.rs | 29 ++++++++++--------- stackslib/src/core/mempool.rs | 6 ++-- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 05583f85ba3..cb99b8c7f26 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5092,7 +5092,7 @@ fn paramaterized_mempool_walk_test( #[test] /// Test that the mempool walk query ignores old nonces and prefers next possible nonces before higher global fees. fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { - let key_address_pairs: Vec<(Secp256k1PrivateKey, StacksAddress)> = (0..6) + let key_address_pairs: Vec<(Secp256k1PrivateKey, StacksAddress)> = (0..7) .map(|_user_index| { let privk = StacksPrivateKey::new(); let addr = StacksAddress::from_public_keys( @@ -5115,6 +5115,7 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { let address_3 = accounts[3].to_string(); let address_4 = accounts[4].to_string(); let address_5 = accounts[5].to_string(); + let address_6 = accounts[6].to_string(); let test_name = function_name!(); let mut peer_config = TestPeerConfig::new(&test_name, 0, 0); @@ -5146,26 +5147,27 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { let mut tx_events = Vec::new(); - // Simulate next possible nonces for all addresses + // Simulate next possible nonces for **some** addresses. Leave some blank so we can test the case where the nonce cannot be + // found on the db table and has to be pulled from the MARF. let mempool_tx = mempool.tx_begin().unwrap(); mempool_tx .execute( - "INSERT INTO nonces (address, nonce) VALUES (?, ?), (?, ?), (?, ?), (?, ?), (?, ?), (?, ?)", - params![address_0, 2, address_1, 1, address_2, 6, address_3, 0, address_4, 1, address_5, 0], + "INSERT INTO nonces (address, nonce) VALUES (?, ?), (?, ?), (?, ?), (?, ?), (?, ?)", + params![address_0, 2, address_1, 1, address_2, 6, address_4, 1, address_5, 0], ) .unwrap(); mempool_tx.commit().unwrap(); - // Test vectors with a wide variety of origin/sponsor configurations and fee rate values. Some transactions do not have a - // sponsor, some others do, some others are sponsored by other sponsors. All in flight at the same time. + // Test transactions with a wide variety of origin/sponsor configurations and fee rate values. Some transactions do not have a + // sponsor, some others do, and some others are sponsored by other sponsors. All will be in flight at the same time. // - // tuple shape -> (origin_address_index, origin_nonce, sponsor_address_index, sponsor_nonce, fee_rate) + // tuple shape: (origin_address_index, origin_nonce, sponsor_address_index, sponsor_nonce, fee_rate) let test_vectors = vec![ (0, 0, 0, 0, 100.0), // Old origin nonce - ignored (0, 1, 0, 1, 200.0), // Old origin nonce - ignored (0, 2, 0, 2, 300.0), (0, 3, 0, 3, 400.0), - (0, 4, 3, 0, 500.0), + (0, 4, 3, 0, 500.0), // Nonce 0 for address 3 is not in the table but will be valid on MARF (1, 0, 1, 0, 400.0), // Old origin nonce - ignored (1, 1, 3, 1, 600.0), (1, 2, 3, 2, 700.0), @@ -5186,10 +5188,12 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { (5, 1, 5, 1, 500.0), (5, 3, 4, 4, 2000.0), (5, 4, 4, 5, 2000.0), + (6, 2, 6, 2, 1000.0), // Address has nonce 0 in MARF - ignored ]; for (origin_index, origin_nonce, sponsor_index, sponsor_nonce, fee_rate) in test_vectors.into_iter() { + // Create tx, either standard or sponsored let mut tx = if origin_index != sponsor_index { let payload = TransactionPayload::TokenTransfer( recipient.to_account_principal(), @@ -5218,13 +5222,11 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { let origin_address = tx.origin_address(); let sponsor_address = tx.sponsor_address().unwrap_or(origin_address); - tx.set_tx_fee(fee_rate as u64); let txid = tx.txid(); let tx_bytes = tx.serialize_to_vec(); let tx_fee = tx.get_tx_fee(); let height = 100; - MemPoolDB::try_add_tx( &mut mempool_tx, &mut chainstate, @@ -5242,17 +5244,18 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { None, ) .unwrap(); - mempool_tx .execute( "UPDATE mempool SET fee_rate = ? WHERE txid = ?", params![Some(fee_rate), &txid], ) .unwrap(); + mempool_tx.commit().unwrap(); } - // Visit transactions. Keep a record of the order of visited txs so we can compare at the end. + // Visit transactions using the `NextNonceWithHighestFeeRate` strategy. Keep a record of the order of visits so we can compare + // at the end. let mut mempool_settings = MemPoolWalkSettings::default(); mempool_settings.strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; let mut considered_txs = vec![]; @@ -5261,7 +5264,6 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { &TEST_BURN_STATE_DB, &StacksBlockHeader::make_index_block_hash(&b_2.0, &b_2.1), |clarity_conn| { - // When the candidate cache fills, one pass cannot process all transactions loop { if mempool .iterate_candidates::<_, ChainstateError, _>( @@ -5302,6 +5304,7 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { } // Expected transaction consideration order, sorted by mineable first (next origin+sponsor nonces, highest fee). + // Ignores old and very future nonces. let expected_tx_order = vec![ (address_2.clone(), 6, address_4.clone(), 1, 1000), (address_2.clone(), 7, address_4.clone(), 2, 800), diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index eded6a6416f..2b3c0bfb595 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1743,8 +1743,8 @@ impl MemPoolDB { // Selects the next mempool transaction to consider using a heuristic that maximizes miner fee profitability and minimizes // CPU time wasted on already-mined or not-yet-mineable transactions. This heuristic takes the following steps: // - // 1. Filters out transactions that have nonces smaller than the origin and sponsor address' next expected nonce as stated - // in the `nonces` table, when possible + // 1. Filters out transactions to consider only those that have the next expected nonce for both the origin and sponsor, + // when possible // 2. Adds a "simulated" fee rate to transactions that don't have it by multiplying the mempool's maximum current fee rate // by a random number. This helps us mix these transactions with others to guarantee they get processed in a reasonable // order @@ -1842,8 +1842,6 @@ impl MemPoolDB { } } MemPoolWalkStrategy::NextNonceWithHighestFeeRate => { - // Execute the query to get a single row. We do not use an iterator because we want the top rank to be - // recalculated every time we visit a transaction. match query_stmt_nonce_rank .query(NO_PARAMS) .map_err(|err| Error::SqliteError(err))? From 07cf97f2889ef11d0534870116f64db4edb291ea Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Wed, 18 Dec 2024 10:41:30 -0600 Subject: [PATCH 015/238] style: fixes --- .../stacks/tests/block_construction.rs | 10 +++++----- stackslib/src/core/mempool.rs | 16 ++++++++++------ 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index cb99b8c7f26..3be28946698 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5163,12 +5163,12 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { // // tuple shape: (origin_address_index, origin_nonce, sponsor_address_index, sponsor_nonce, fee_rate) let test_vectors = vec![ - (0, 0, 0, 0, 100.0), // Old origin nonce - ignored - (0, 1, 0, 1, 200.0), // Old origin nonce - ignored + (0, 0, 0, 0, 100.0), // Old origin nonce - ignored + (0, 1, 0, 1, 200.0), // Old origin nonce - ignored (0, 2, 0, 2, 300.0), (0, 3, 0, 3, 400.0), - (0, 4, 3, 0, 500.0), // Nonce 0 for address 3 is not in the table but will be valid on MARF - (1, 0, 1, 0, 400.0), // Old origin nonce - ignored + (0, 4, 3, 0, 500.0), // Nonce 0 for address 3 is not in the table but will be valid on MARF + (1, 0, 1, 0, 400.0), // Old origin nonce - ignored (1, 1, 3, 1, 600.0), (1, 2, 3, 2, 700.0), (1, 3, 3, 3, 800.0), @@ -5176,7 +5176,7 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { (2, 3, 2, 3, 9000.0), // Old origin nonce - ignored (2, 4, 2, 4, 9000.0), // Old origin nonce - ignored (2, 5, 2, 5, 9000.0), // Old origin nonce - ignored - (2, 6, 4, 0, 900.0), // Old sponsor nonce - ignored + (2, 6, 4, 0, 900.0), // Old sponsor nonce - ignored (2, 6, 4, 1, 1000.0), (2, 7, 4, 2, 800.0), (2, 8, 2, 8, 1000.0), diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 2b3c0bfb595..2e5fc95bfc4 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1767,7 +1767,7 @@ impl MemPoolDB { LEFT JOIN nonces AS no ON m.origin_address = no.address LEFT JOIN nonces AS ns ON m.sponsor_address = ns.address WHERE (no.address IS NULL OR m.origin_nonce = no.nonce) - AND (ns.address IS NULL OR m.sponsor_nonce = ns.nonce) + AND (ns.address IS NULL OR m.sponsor_nonce = ns.nonce) ), address_nonce_ranked AS ( SELECT *, @@ -1808,8 +1808,8 @@ impl MemPoolDB { // When the retry list is empty, read from the mempool db depending on the configured miner strategy match settings.strategy { MemPoolWalkStrategy::GlobalFeeRate => { - let start_with_no_estimate = - tx_consideration_sampler.sample(&mut rng) < settings.consider_no_estimate_tx_prob; + let start_with_no_estimate = tx_consideration_sampler.sample(&mut rng) + < settings.consider_no_estimate_tx_prob; // randomly select from either the null fee-rate transactions or those with fee-rate estimates. let opt_tx = if start_with_no_estimate { null_iterator @@ -1819,11 +1819,15 @@ impl MemPoolDB { fee_iterator.next().map_err(|err| Error::SqliteError(err))? }; match opt_tx { - Some(row) => (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate), + Some(row) => { + (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate) + } None => { // If the selected iterator is empty, check the other match if start_with_no_estimate { - fee_iterator.next().map_err(|err| Error::SqliteError(err))? + fee_iterator + .next() + .map_err(|err| Error::SqliteError(err))? } else { null_iterator .next() @@ -1852,7 +1856,7 @@ impl MemPoolDB { let tx = MemPoolTxInfoPartial::from_row(row)?; let update_estimate = tx.fee_rate.is_none(); (tx, update_estimate) - }, + } None => { debug!("No more transactions to consider in mempool"); break MempoolIterationStopReason::NoMoreCandidates; From 0b0b821936b93c64d900c6c3e00f4af1aa49e679 Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Wed, 18 Dec 2024 10:46:47 -0600 Subject: [PATCH 016/238] style: error transforms --- .../stacks/tests/block_construction.rs | 8 +++---- stackslib/src/core/mempool.rs | 22 +++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 3be28946698..79491afa9e9 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5163,12 +5163,12 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { // // tuple shape: (origin_address_index, origin_nonce, sponsor_address_index, sponsor_nonce, fee_rate) let test_vectors = vec![ - (0, 0, 0, 0, 100.0), // Old origin nonce - ignored - (0, 1, 0, 1, 200.0), // Old origin nonce - ignored + (0, 0, 0, 0, 100.0), // Old origin nonce - ignored + (0, 1, 0, 1, 200.0), // Old origin nonce - ignored (0, 2, 0, 2, 300.0), (0, 3, 0, 3, 400.0), - (0, 4, 3, 0, 500.0), // Nonce 0 for address 3 is not in the table but will be valid on MARF - (1, 0, 1, 0, 400.0), // Old origin nonce - ignored + (0, 4, 3, 0, 500.0), // Nonce 0 for address 3 is not in the table but will be valid on MARF + (1, 0, 1, 0, 400.0), // Old origin nonce - ignored (1, 1, 3, 1, 600.0), (1, 2, 3, 2, 700.0), (1, 3, 3, 3, 800.0), diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 2e5fc95bfc4..2f56d10969e 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1720,10 +1720,10 @@ impl MemPoolDB { let mut query_stmt_null = self .db .prepare(&sql) - .map_err(|err| Error::SqliteError(err))?; + .map_err(Error::SqliteError)?; let mut null_iterator = query_stmt_null .query(NO_PARAMS) - .map_err(|err| Error::SqliteError(err))?; + .map_err(Error::SqliteError)?; let sql = " SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate FROM mempool @@ -1733,10 +1733,10 @@ impl MemPoolDB { let mut query_stmt_fee = self .db .prepare(&sql) - .map_err(|err| Error::SqliteError(err))?; + .map_err(Error::SqliteError)?; let mut fee_iterator = query_stmt_fee .query(NO_PARAMS) - .map_err(|err| Error::SqliteError(err))?; + .map_err(Error::SqliteError)?; // == Query for `NextNonceWithHighestFeeRate` mempool walk strategy // @@ -1789,7 +1789,7 @@ impl MemPoolDB { let mut query_stmt_nonce_rank = self .db .prepare(&sql) - .map_err(|err| Error::SqliteError(err))?; + .map_err(Error::SqliteError)?; let stop_reason = loop { if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { @@ -1814,9 +1814,9 @@ impl MemPoolDB { let opt_tx = if start_with_no_estimate { null_iterator .next() - .map_err(|err| Error::SqliteError(err))? + .map_err(Error::SqliteError)? } else { - fee_iterator.next().map_err(|err| Error::SqliteError(err))? + fee_iterator.next().map_err(Error::SqliteError)? }; match opt_tx { Some(row) => { @@ -1827,11 +1827,11 @@ impl MemPoolDB { match if start_with_no_estimate { fee_iterator .next() - .map_err(|err| Error::SqliteError(err))? + .map_err(Error::SqliteError)? } else { null_iterator .next() - .map_err(|err| Error::SqliteError(err))? + .map_err(Error::SqliteError)? } { Some(row) => ( MemPoolTxInfoPartial::from_row(row)?, @@ -1848,9 +1848,9 @@ impl MemPoolDB { MemPoolWalkStrategy::NextNonceWithHighestFeeRate => { match query_stmt_nonce_rank .query(NO_PARAMS) - .map_err(|err| Error::SqliteError(err))? + .map_err(Error::SqliteError)? .next() - .map_err(|err| Error::SqliteError(err))? + .map_err(Error::SqliteError)? { Some(row) => { let tx = MemPoolTxInfoPartial::from_row(row)?; From 635cfe3a6f38282f7728946a2d97db84521481da Mon Sep 17 00:00:00 2001 From: Rafael Cardenas Date: Wed, 18 Dec 2024 19:00:13 -0600 Subject: [PATCH 017/238] fix: style --- stackslib/src/config/mod.rs | 15 +++------------ stackslib/src/core/mempool.rs | 27 ++++++--------------------- 2 files changed, 9 insertions(+), 33 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index c8a33485033..010ecc16fd8 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2676,18 +2676,9 @@ impl MinerConfigFile { activated_vrf_key_path: self.activated_vrf_key_path.clone(), fast_rampup: self.fast_rampup.unwrap_or(miner_default_config.fast_rampup), underperform_stop_threshold: self.underperform_stop_threshold, - mempool_walk_strategy: { - if let Some(mempool_walk_strategy) = &self.mempool_walk_strategy { - match str::parse(&mempool_walk_strategy) { - Ok(strategy) => strategy, - Err(e) => { - panic!("could not parse '{mempool_walk_strategy}': {e}"); - }, - } - } else { - MemPoolWalkStrategy::GlobalFeeRate - } - }, + mempool_walk_strategy: self.mempool_walk_strategy + .map(|s| str::parse(&s).unwrap_or_else(|e| panic!("Could not parse '{s}': {e}"))) + .unwrap_or(MemPoolWalkStrategy::GlobalFeeRate), txs_to_consider: { if let Some(txs_to_consider) = &self.txs_to_consider { txs_to_consider diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 2f56d10969e..066c8ba2ac5 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1717,10 +1717,7 @@ impl MemPoolDB { FROM mempool WHERE fee_rate IS NULL "; - let mut query_stmt_null = self - .db - .prepare(&sql) - .map_err(Error::SqliteError)?; + let mut query_stmt_null = self.db.prepare(&sql).map_err(Error::SqliteError)?; let mut null_iterator = query_stmt_null .query(NO_PARAMS) .map_err(Error::SqliteError)?; @@ -1730,10 +1727,7 @@ impl MemPoolDB { WHERE fee_rate IS NOT NULL ORDER BY fee_rate DESC "; - let mut query_stmt_fee = self - .db - .prepare(&sql) - .map_err(Error::SqliteError)?; + let mut query_stmt_fee = self.db.prepare(&sql).map_err(Error::SqliteError)?; let mut fee_iterator = query_stmt_fee .query(NO_PARAMS) .map_err(Error::SqliteError)?; @@ -1786,10 +1780,7 @@ impl MemPoolDB { ORDER BY origin_rank ASC, sponsor_rank ASC, sort_fee_rate DESC LIMIT 1 "; - let mut query_stmt_nonce_rank = self - .db - .prepare(&sql) - .map_err(Error::SqliteError)?; + let mut query_stmt_nonce_rank = self.db.prepare(&sql).map_err(Error::SqliteError)?; let stop_reason = loop { if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { @@ -1812,9 +1803,7 @@ impl MemPoolDB { < settings.consider_no_estimate_tx_prob; // randomly select from either the null fee-rate transactions or those with fee-rate estimates. let opt_tx = if start_with_no_estimate { - null_iterator - .next() - .map_err(Error::SqliteError)? + null_iterator.next().map_err(Error::SqliteError)? } else { fee_iterator.next().map_err(Error::SqliteError)? }; @@ -1825,13 +1814,9 @@ impl MemPoolDB { None => { // If the selected iterator is empty, check the other match if start_with_no_estimate { - fee_iterator - .next() - .map_err(Error::SqliteError)? + fee_iterator.next().map_err(Error::SqliteError)? } else { - null_iterator - .next() - .map_err(Error::SqliteError)? + null_iterator.next().map_err(Error::SqliteError)? } { Some(row) => ( MemPoolTxInfoPartial::from_row(row)?, From 0f68ea7b8d9ce0c1978fc356e3f8b9dff27eb8fb Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 13 Feb 2025 11:03:35 -0800 Subject: [PATCH 018/238] feat: skip sending pending payload if URL doesnt match --- testnet/stacks-node/src/event_dispatcher.rs | 67 +++++++++++++++++++-- 1 file changed, 61 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 2203a8c5521..4b16ea61e84 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -425,7 +425,7 @@ impl EventObserver { Ok(()) } - fn process_pending_payloads(conn: &Connection) { + fn process_pending_payloads(&self, conn: &Connection) { let pending_payloads = match Self::get_pending_payloads(conn) { Ok(payloads) => payloads, Err(e) => { @@ -438,6 +438,10 @@ impl EventObserver { }; for (id, url, payload, timeout_ms) in pending_payloads { + // If the URL is not the same as the endpoint, skip it + if !url.starts_with(&self.endpoint) { + continue; + } let timeout = Duration::from_millis(timeout_ms); Self::send_payload_directly(&payload, &url, timeout); @@ -563,7 +567,7 @@ impl EventObserver { Self::insert_payload_with_retry(&conn, &full_url, payload, self.timeout); // Process all pending payloads - Self::process_pending_payloads(&conn); + self.process_pending_payloads(&conn); } else { // No database, just send the payload Self::send_payload_directly(payload, &full_url, self.timeout); @@ -2042,16 +2046,19 @@ mod test { use mockito::Matcher; let dir = tempdir().unwrap(); - let db_path = dir.path().join("test_process_payloads.sqlite"); + let db_path = dir.path().join("event_observers.sqlite"); let db_path_str = db_path.to_str().unwrap(); + let mut server = mockito::Server::new(); + let endpoint = server.url().to_string(); + let timeout = Duration::from_secs(5); + let observer = + EventObserver::new(Some(dir.path().to_path_buf()), endpoint.clone(), timeout); let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); let payload = json!({"key": "value"}); let timeout = Duration::from_secs(5); - // Create a mock server - let mut server = mockito::Server::new(); let _m = server .mock("POST", "/api") .match_header("content-type", Matcher::Regex("application/json.*".into())) @@ -2068,7 +2075,7 @@ mod test { .expect("Failed to insert payload"); // Process pending payloads - EventObserver::process_pending_payloads(&conn); + observer.process_pending_payloads(&conn); // Verify that the pending payloads list is empty let pending_payloads = @@ -2079,6 +2086,54 @@ mod test { _m.assert(); } + #[test] + fn pending_payloads_are_skipped_if_url_does_not_match() { + let dir = tempdir().unwrap(); + let db_path = dir.path().join("event_observers.sqlite"); + let db_path_str = db_path.to_str().unwrap(); + + let mut server = mockito::Server::new(); + let endpoint = server.url().to_string(); + let timeout = Duration::from_secs(5); + let observer = + EventObserver::new(Some(dir.path().to_path_buf()), endpoint.clone(), timeout); + + let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + + let payload = json!({"key": "value"}); + let timeout = Duration::from_secs(5); + + let mock = server + .mock("POST", "/api") + .match_header( + "content-type", + mockito::Matcher::Regex("application/json.*".into()), + ) + .match_body(mockito::Matcher::Json(payload.clone())) + .with_status(200) + .expect(0) // Expect 0 calls to this endpoint + .create(); + + // Use a different URL than the observer's endpoint + let url = "http://different-domain.com/api"; + + EventObserver::insert_payload(&conn, url, &payload, timeout) + .expect("Failed to insert payload"); + + observer.process_pending_payloads(&conn); + + let pending_payloads = + EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + // Verify that the pending payload is still in the database + assert_eq!( + pending_payloads.len(), + 1, + "Expected payload to remain in database since URL didn't match" + ); + + mock.assert(); + } + #[test] fn test_new_event_observer_with_db() { let dir = tempdir().unwrap(); From 734001ffface1d944664b999af31d62341ad3a56 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 13 Feb 2025 11:12:17 -0800 Subject: [PATCH 019/238] chore: changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cc57f6d80c..a4439caa963 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Logging improvements: - P2P logs now includes a reason for dropping a peer or neighbor - Improvements to how a PeerAddress is logged (human readable format vs hex) +- Pending event dispatcher requests will no longer be sent to URLs that are no longer registered as event observers ([#5834](https://github.com/stacks-network/stacks-core/pull/5834)) ### Fixed From c533237f9715fa56456b6d17daf6bb85941445d1 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Fri, 14 Feb 2025 11:24:35 +0000 Subject: [PATCH 020/238] Add `flake.nix` --- flake.lock | 101 ++++++++++++++++++++++++++++ flake.nix | 190 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 291 insertions(+) create mode 100644 flake.lock create mode 100644 flake.nix diff --git a/flake.lock b/flake.lock new file mode 100644 index 00000000000..3284405275f --- /dev/null +++ b/flake.lock @@ -0,0 +1,101 @@ +{ + "nodes": { + "crane": { + "locked": { + "lastModified": 1739053031, + "narHash": "sha256-LrMDRuwAlRFD2T4MgBSRd1s2VtOE+Vl1oMCNu3RpPE0=", + "owner": "ipetkov", + "repo": "crane", + "rev": "112e6591b2d6313b1bd05a80a754a8ee42432a7e", + "type": "github" + }, + "original": { + "owner": "ipetkov", + "repo": "crane", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": [ + "systems" + ] + }, + "locked": { + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1739419412, + "narHash": "sha256-NCWZQg4DbYVFWg+MOFrxWRaVsLA7yvRWAf6o0xPR1hI=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "2d55b4c1531187926c2a423f6940b3b1301399b5", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "crane": "crane", + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs", + "rust-overlay": "rust-overlay", + "systems": "systems" + } + }, + "rust-overlay": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1739413688, + "narHash": "sha256-57OAXXYhOibG7Rqhhr4ecI1H8mtDJB2uj0T8rbQVGLY=", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "675a6427d505f140dab8c56379afb66d4f55800b", + "type": "github" + }, + "original": { + "owner": "oxalica", + "repo": "rust-overlay", + "type": "github" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 00000000000..ea94db3105b --- /dev/null +++ b/flake.nix @@ -0,0 +1,190 @@ +{ + description = "stacks-core"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + systems.url = "github:nix-systems/default"; + + flake-utils = { + url = "github:numtide/flake-utils"; + inputs.systems.follows = "systems"; + }; + + rust-overlay = { + url = "github:oxalica/rust-overlay"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + + crane = { + url = "github:ipetkov/crane"; + }; + + }; + + outputs = + { + nixpkgs, + flake-utils, + rust-overlay, + crane, + ... + }: + flake-utils.lib.eachDefaultSystem ( + system: + let + overlays = [ (import rust-overlay) ]; + pkgs = import nixpkgs { + inherit system overlays; + }; + + inherit (pkgs) lib; + + toolchain = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain; + craneLib = (crane.mkLib pkgs).overrideToolchain toolchain; + + name = "stacks-core"; + + versions = (builtins.fromTOML (builtins.readFile ./versions.toml)); + version = versions.stacks_node_version; + + # Common arguments can be set here to avoid repeating them later + commonArgs = { + strictDeps = true; + + buildInputs = + [ + # Add additional build inputs here + ] + ++ lib.optionals pkgs.stdenv.isDarwin [ + # Darwin specific inputs + pkgs.darwin.apple_sdk.frameworks.SystemConfiguration + ]; + }; + + # Build *just* the cargo dependencies, so we can reuse + # all of that work (e.g. via cachix) when running in CI + cargoArtifacts = craneLib.buildDepsOnly ( + commonArgs + // { + inherit version; + pname = name; + src = fileSetForCrate ./.; + } + ); + + individualCrateArgs = commonArgs // { + inherit cargoArtifacts; + + # NB: we disable tests since we'll run them all via cargo-nextest + doCheck = false; + }; + + # TODO: Return minimum fileSets per each crate + fileSetForCrate = + crate: + lib.fileset.toSource { + root = ./.; + fileset = lib.fileset.unions [ + ./Cargo.toml + ./Cargo.lock + # + ./versions.toml + # + ./stx-genesis/name_zonefiles.txt + ./stx-genesis/name_zonefiles.txt.sha256 + ./stx-genesis/name_zonefiles-test.txt + ./stx-genesis/name_zonefiles-test.txt.sha256 + ./stx-genesis/chainstate.txt + ./stx-genesis/chainstate.txt.sha256 + ./stx-genesis/chainstate-test.txt + ./stx-genesis/chainstate-test.txt.sha256 + # + (craneLib.fileset.commonCargoSources crate) + # + (lib.fileset.fileFilter (file: file.hasExt "clar") ./.) + # + (craneLib.fileset.commonCargoSources ./clarity) + (craneLib.fileset.commonCargoSources ./contrib/tools/relay-server) + (craneLib.fileset.commonCargoSources ./libsigner) + (craneLib.fileset.commonCargoSources ./libstackerdb) + (craneLib.fileset.commonCargoSources ./pox-locking) + (craneLib.fileset.commonCargoSources ./stacks-common) + (craneLib.fileset.commonCargoSources ./stackslib) + (craneLib.fileset.commonCargoSources ./stx-genesis) + (craneLib.fileset.commonCargoSources ./testnet/stacks-node) + ]; + }; + + stacks-signer = craneLib.buildPackage ( + individualCrateArgs + // rec { + version = versions.stacks_signer_version; + pname = "stacks-signer"; + cargoFeatures = "--features monitoring_prom"; + cargoExtraArgs = "${cargoFeatures} -p ${pname}"; + src = fileSetForCrate ./stacks-signer; + } + ); + + # Build the actual crate itself, reusing the dependency + # artifacts from above. + stacks-core = craneLib.buildPackage ( + commonArgs + // rec { + inherit version cargoArtifacts; + doCheck = false; + pname = name; + cargoFeatures = "--features monitoring_prom,slog_json"; + cargoExtraArgs = "${cargoFeatures}"; + src = fileSetForCrate ./.; + } + ); + in + with pkgs; + { + packages = { + inherit stacks-signer; + default = stacks-core; + }; + + apps = rec { + stacks-node = { + type = "app"; + program = "${stacks-core}/bin/stacks-node"; + }; + stacks-signer = { + type = "app"; + program = "${stacks-signer}/bin/stacks-signer"; + }; + default = stacks-node; + }; + + checks = { + inherit stacks-core; + }; + + devShells.default = craneLib.devShell { + RUSTFMT = "${toolchain}/bin/rustfmt"; + GREETING = "Welcome, stacks-core developer!"; + shellHook = '' + echo $GREETING + + echo "Setting a few options that will help you when running tests:" + set -x + ulimit -n 10240 + set +x + ''; + + packages = + [ + rust-analyzer + bitcoind + ] + ++ lib.optionals pkgs.stdenv.isDarwin [ + pkgs.darwin.apple_sdk.frameworks.SystemConfiguration + pkgs.darwin.apple_sdk.frameworks.CoreServices + ]; + }; + } + ); +} From cdcaa581835ad37cbb23fd089f08397d1c2ad4ba Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Sat, 15 Feb 2025 12:05:16 +0100 Subject: [PATCH 021/238] added CheckErrors::ExecutionTimeExpired --- clarity/src/vm/analysis/errors.rs | 4 ++++ clarity/src/vm/contexts.rs | 3 +++ clarity/src/vm/mod.rs | 15 +++++++++++++++ clarity/src/vm/tests/simple_apply_eval.rs | 11 +++++++++++ 4 files changed, 33 insertions(+) diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index 5c3f68c7f95..8708e85f250 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -194,6 +194,9 @@ pub enum CheckErrors { WriteAttemptedInReadOnly, AtBlockClosureMustBeReadOnly, + + // time checker errors + ExecutionTimeExpired, } #[derive(Debug, PartialEq)] @@ -466,6 +469,7 @@ impl DiagnosableError for CheckErrors { CheckErrors::UncheckedIntermediaryResponses => "intermediary responses in consecutive statements must be checked".into(), CheckErrors::CostComputationFailed(s) => format!("contract cost computation failed: {}", s), CheckErrors::CouldNotDetermineSerializationType => "could not determine the input type for the serialization function".into(), + CheckErrors::ExecutionTimeExpired => "execution time expired".into(), } } diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index a9779e96e6a..b0f1995e8b9 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -17,6 +17,7 @@ use std::collections::BTreeMap; use std::fmt; use std::mem::replace; +use std::time::Instant; use hashbrown::{HashMap, HashSet}; use serde::Serialize; @@ -199,6 +200,7 @@ pub struct GlobalContext<'a, 'hooks> { /// This is the chain ID of the transaction pub chain_id: u32, pub eval_hooks: Option>, + pub execution_time_tracker: Instant, } #[derive(Serialize, Deserialize, Clone)] @@ -1544,6 +1546,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { epoch_id, chain_id, eval_hooks: None, + execution_time_tracker: Instant::now(), } } diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 82c9b5a4db7..4d0477f98a5 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -312,6 +312,21 @@ pub fn eval( Atom, AtomValue, Field, List, LiteralValue, TraitReference, }; + if env + .global_context + .execution_time_tracker + .elapsed() + .as_secs() + > 1 + { + warn!( + "ExecutionTime expired while running {:?} ({:?})", + exp, + env.global_context.execution_time_tracker.elapsed() + ); + return Err(CheckErrors::ExecutionTimeExpired.into()); + } + if let Some(mut eval_hooks) = env.global_context.eval_hooks.take() { for hook in eval_hooks.iter_mut() { hook.will_begin_eval(env, context, exp); diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index ceeb7f9ddb5..eb9c89906a1 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -1763,3 +1763,14 @@ fn test_chain_id() { ) }); } + +#[test] +fn test_execution_time_expiration() { + let mut program = String::from("(define-private (dummy (a uint) (b uint)) (+ a b)) (define-private (adder (a uint) (b uint)) (+ a (+ b (dummy a a)))) (* (adder u100000000000000000 (adder u20000000000000000000 u20000000000000000000)) u100) "); + + for i in 1..200000 { + program.push_str("(adder u100000000000000000 u9900000000000000000) "); + } + + assert_eq!(vm_execute(&program).err().unwrap(), CheckErrors::ExecutionTimeExpired.into()); +} From 3f61d022dc2efafbbb07317452177b6b392f6e44 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Mon, 17 Feb 2025 06:12:45 +0100 Subject: [PATCH 022/238] added extreme test --- clarity/src/vm/mod.rs | 12 +- clarity/src/vm/tests/simple_apply_eval.rs | 623 +++++++++++++++++++++- 2 files changed, 622 insertions(+), 13 deletions(-) diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 4d0477f98a5..71e1527c5e3 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -54,6 +54,7 @@ pub mod test_util; pub mod clarity; use std::collections::BTreeMap; +use std::time::Duration; use serde_json; use stacks_common::types::StacksEpochId; @@ -86,6 +87,7 @@ use crate::vm::types::{PrincipalData, TypeSignature}; pub use crate::vm::version::ClarityVersion; pub const MAX_CALL_STACK_DEPTH: usize = 64; +pub const MAX_EXECUTION_TIME_SECS: u64 = 10; #[derive(Debug, Clone)] pub struct ParsedContract { @@ -312,15 +314,11 @@ pub fn eval( Atom, AtomValue, Field, List, LiteralValue, TraitReference, }; - if env - .global_context - .execution_time_tracker - .elapsed() - .as_secs() - > 1 + if env.global_context.execution_time_tracker.elapsed() + > Duration::from_secs(MAX_EXECUTION_TIME_SECS) { warn!( - "ExecutionTime expired while running {:?} ({:?})", + "ExecutionTime expired while running {:?} ({:?} elapsed)", exp, env.global_context.execution_time_tracker.elapsed() ); diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index eb9c89906a1..0b3669f06b3 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -1766,11 +1766,622 @@ fn test_chain_id() { #[test] fn test_execution_time_expiration() { - let mut program = String::from("(define-private (dummy (a uint) (b uint)) (+ a b)) (define-private (adder (a uint) (b uint)) (+ a (+ b (dummy a a)))) (* (adder u100000000000000000 (adder u20000000000000000000 u20000000000000000000)) u100) "); - - for i in 1..200000 { - program.push_str("(adder u100000000000000000 u9900000000000000000) "); - } + let program = String::from( + r#";; Block Limits +;; { +;; "read_count": 15_000, +;; "read_length": 100_000_000, +;; "runtime": 5_000_000_000, +;; "write_count": 15_000, +;; "write_length": 15_000_000, +;; } + +(define-constant ERR_UNWRAP (err u101)) + +;; Variables +(define-data-var value-used-read-count uint u0) +(define-data-var temp-list (list 2000 uint) + (list )) + +(define-map test-map uint uint) + +;; ;; Functions +(define-private (initialize) + (begin + (var-set temp-list (list )) + (var-set value-used-read-count u0) + ) +) + +;; ;; Test read count limit +(define-private (read-count-one (current-number uint)) + (begin + (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) + (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) + (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) + (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) + (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) + (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) + (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) + (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) + (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) + (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) + (ok true) + ) +) + +(define-public (read-count-test (current-numbers (list 1000 uint))) + (begin + (initialize) + (ok (map read-count-one current-numbers)) + ) +) + + +;; ;; Test read length limit +(define-private (read-length-one (position uint)) + (begin + (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) + (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) + (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) + (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) + (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) + (ok true) + ) +) + +(define-public (read-length-test (current-numbers (list 1200 uint))) + (begin + (initialize) + (ok (map read-length-one current-numbers)) + ) +) + +;; Test write count limit +;; (define-private (write-count-one (current-number uint)) +;; (begin +;; (var-set value-used-read-count (+ (* current-number current-number) current-number)) +;; (var-set value-used-read-count (* u2 (var-get value-used-read-count))) +;; (var-set value-used-read-count u3) +;; ) +;; ) + +;; Test write count limit +(define-private (write-count-one (current-number uint)) + (begin + ;; Counts a write count as a read count as well + ;; (var-set value-used-read-count u2) + ;; (var-set value-used-read-count u2) + ;; (var-set value-used-read-count u2) + ;; (var-set value-used-read-count u2) + ;; (var-set value-used-read-count u2) + ;; (var-set value-used-read-count u2) + ;; (var-set value-used-read-count u2) + ;; (var-set value-used-read-count u2) + ;; (var-set value-used-read-count u2) + ;; (var-set value-used-read-count u2) + ;; (var-set value-used-read-count u2) + ;; Counts a write count as a read count as well + ;; (map-set test-map current-number u1) + ;; (map-set test-map (+ current-number u1000) u2) + ;; (map-set test-map (+ current-number u2000) u3) + ;; (map-set test-map (+ current-number u3000) u4) + ;; (map-set test-map (+ current-number u4000) u5) + ;; (map-set test-map (+ current-number u5000) u6) + ;; (map-set test-map (+ current-number u6000) u7) + ;; (map-set test-map (+ current-number u7000) u8) + ;; (map-set test-map (+ current-number u8000) u9) + ;; (map-set test-map (+ current-number u9000) u10) + ;; (map-set test-map (+ current-number u10000) u11) + ;; (map-set test-map (+ current-number u11000) u12) + ;; (map-set test-map (+ current-number u12000) u13) + ;; (map-set test-map (+ current-number u13000) u14) + ;; (map-set test-map (+ current-number u14000) u15) + ;; Counts a write count as a read count as well + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) + (ok true) + ) +) + +(define-public (write-count-test (current-numbers (list 1000 uint))) + (begin + (initialize ) + (ok (map write-count-one current-numbers)) + ) +) + + +;; Test write length limit +(define-private (write-length-one (current-number uint)) + (begin + (var-set temp-list (unwrap! (as-max-len? (append (var-get temp-list) current-number) u2000) ERR_UNWRAP)) + (ok true) + ) +) + +(define-public (write-length-test (current-numbers (list 1000 uint))) + (begin + (initialize ) + ;; Chain multiple write operations + (map write-length-one current-numbers) + (map write-length-one current-numbers) + (map write-length-one current-numbers) + (map write-length-one current-numbers) + (map write-length-one current-numbers) + ;; Store final result + (var-set value-used-read-length (var-get temp-list)) + (ok true) + ) +) + +;; Test computation limit +(define-private (computation-one (x int) (y int)) + (begin + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + (+ + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) + ) + + y + ) +) + +(define-private (computation-three (x int) (y int)) + (+ (computation-one x y) (computation-one x y) (computation-one x y)) +) + +(define-public (computation-test (l (list 1000 int)) (init int)) + (begin + (initialize ) + (ok (fold computation-three l init)) + ) +) + +;; List of values +(define-data-var value-used-read-length (list 5000 uint) + (list + u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11 u12 u13 u14 u15 u16 u17 u18 u19 + u20 u21 u22 u23 u24 u25 u26 u27 u28 u29 u30 u31 u32 u33 u34 u35 u36 u37 u38 u39 + u40 u41 u42 u43 u44 u45 u46 u47 u48 u49 u50 u51 u52 u53 u54 u55 u56 u57 u58 u59 + u60 u61 u62 u63 u64 u65 u66 u67 u68 u69 u70 u71 u72 u73 u74 u75 u76 u77 u78 u79 + u80 u81 u82 u83 u84 u85 u86 u87 u88 u89 u90 u91 u92 u93 u94 u95 u96 u97 u98 u99 + u100 u101 u102 u103 u104 u105 u106 u107 u108 u109 u110 u111 u112 u113 u114 u115 u116 u117 u118 u119 + u120 u121 u122 u123 u124 u125 u126 u127 u128 u129 u130 u131 u132 u133 u134 u135 u136 u137 u138 u139 + u140 u141 u142 u143 u144 u145 u146 u147 u148 u149 u150 u151 u152 u153 u154 u155 u156 u157 u158 u159 + u160 u161 u162 u163 u164 u165 u166 u167 u168 u169 u170 u171 u172 u173 u174 u175 u176 u177 u178 u179 + u180 u181 u182 u183 u184 u185 u186 u187 u188 u189 u190 u191 u192 u193 u194 u195 u196 u197 u198 u199 + u200 u201 u202 u203 u204 u205 u206 u207 u208 u209 u210 u211 u212 u213 u214 u215 u216 u217 u218 u219 + u220 u221 u222 u223 u224 u225 u226 u227 u228 u229 u230 u231 u232 u233 u234 u235 u236 u237 u238 u239 + u240 u241 u242 u243 u244 u245 u246 u247 u248 u249 u250 u251 u252 u253 u254 u255 u256 u257 u258 u259 + u260 u261 u262 u263 u264 u265 u266 u267 u268 u269 u270 u271 u272 u273 u274 u275 u276 u277 u278 u279 + u280 u281 u282 u283 u284 u285 u286 u287 u288 u289 u290 u291 u292 u293 u294 u295 u296 u297 u298 u299 + u300 u301 u302 u303 u304 u305 u306 u307 u308 u309 u310 u311 u312 u313 u314 u315 u316 u317 u318 u319 + u320 u321 u322 u323 u324 u325 u326 u327 u328 u329 u330 u331 u332 u333 u334 u335 u336 u337 u338 u339 + u340 u341 u342 u343 u344 u345 u346 u347 u348 u349 u350 u351 u352 u353 u354 u355 u356 u357 u358 u359 + u360 u361 u362 u363 u364 u365 u366 u367 u368 u369 u370 u371 u372 u373 u374 u375 u376 u377 u378 u379 + u380 u381 u382 u383 u384 u385 u386 u387 u388 u389 u390 u391 u392 u393 u394 u395 u396 u397 u398 u399 + u400 u401 u402 u403 u404 u405 u406 u407 u408 u409 u410 u411 u412 u413 u414 u415 u416 u417 u418 u419 + u420 u421 u422 u423 u424 u425 u426 u427 u428 u429 u430 u431 u432 u433 u434 u435 u436 u437 u438 u439 + u440 u441 u442 u443 u444 u445 u446 u447 u448 u449 u450 u451 u452 u453 u454 u455 u456 u457 u458 u459 + u460 u461 u462 u463 u464 u465 u466 u467 u468 u469 u470 u471 u472 u473 u474 u475 u476 u477 u478 u479 + u480 u481 u482 u483 u484 u485 u486 u487 u488 u489 u490 u491 u492 u493 u494 u495 u496 u497 u498 u499 + u500 u501 u502 u503 u504 u505 u506 u507 u508 u509 u510 u511 u512 u513 u514 u515 u516 u517 u518 u519 + u520 u521 u522 u523 u524 u525 u526 u527 u528 u529 u530 u531 u532 u533 u534 u535 u536 u537 u538 u539 + u540 u541 u542 u543 u544 u545 u546 u547 u548 u549 u550 u551 u552 u553 u554 u555 u556 u557 u558 u559 + u560 u561 u562 u563 u564 u565 u566 u567 u568 u569 u570 u571 u572 u573 u574 u575 u576 u577 u578 u579 + u580 u581 u582 u583 u584 u585 u586 u587 u588 u589 u590 u591 u592 u593 u594 u595 u596 u597 u598 u599 + u600 u601 u602 u603 u604 u605 u606 u607 u608 u609 u610 u611 u612 u613 u614 u615 u616 u617 u618 u619 + u620 u621 u622 u623 u624 u625 u626 u627 u628 u629 u630 u631 u632 u633 u634 u635 u636 u637 u638 u639 + u640 u641 u642 u643 u644 u645 u646 u647 u648 u649 u650 u651 u652 u653 u654 u655 u656 u657 u658 u659 + u660 u661 u662 u663 u664 u665 u666 u667 u668 u669 u670 u671 u672 u673 u674 u675 u676 u677 u678 u679 + u680 u681 u682 u683 u684 u685 u686 u687 u688 u689 u690 u691 u692 u693 u694 u695 u696 u697 u698 u699 + u700 u701 u702 u703 u704 u705 u706 u707 u708 u709 u710 u711 u712 u713 u714 u715 u716 u717 u718 u719 + u720 u721 u722 u723 u724 u725 u726 u727 u728 u729 u730 u731 u732 u733 u734 u735 u736 u737 u738 u739 + u740 u741 u742 u743 u744 u745 u746 u747 u748 u749 u750 u751 u752 u753 u754 u755 u756 u757 u758 u759 + u760 u761 u762 u763 u764 u765 u766 u767 u768 u769 u770 u771 u772 u773 u774 u775 u776 u777 u778 u779 + u780 u781 u782 u783 u784 u785 u786 u787 u788 u789 u790 u791 u792 u793 u794 u795 u796 u797 u798 u799 + u800 u801 u802 u803 u804 u805 u806 u807 u808 u809 u810 u811 u812 u813 u814 u815 u816 u817 u818 u819 + u820 u821 u822 u823 u824 u825 u826 u827 u828 u829 u830 u831 u832 u833 u834 u835 u836 u837 u838 u839 + u840 u841 u842 u843 u844 u845 u846 u847 u848 u849 u850 u851 u852 u853 u854 u855 u856 u857 u858 u859 + u860 u861 u862 u863 u864 u865 u866 u867 u868 u869 u870 u871 u872 u873 u874 u875 u876 u877 u878 u879 + u880 u881 u882 u883 u884 u885 u886 u887 u888 u889 u890 u891 u892 u893 u894 u895 u896 u897 u898 u899 + u900 u901 u902 u903 u904 u905 u906 u907 u908 u909 u910 u911 u912 u913 u914 u915 u916 u917 u918 u919 + u920 u921 u922 u923 u924 u925 u926 u927 u928 u929 u930 u931 u932 u933 u934 u935 u936 u937 u938 u939 + u940 u941 u942 u943 u944 u945 u946 u947 u948 u949 u950 u951 u952 u953 u954 u955 u956 u957 u958 u959 + u960 u961 u962 u963 u964 u965 u966 u967 u968 u969 u970 u971 u972 u973 u974 u975 u976 u977 u978 u979 + u980 u981 u982 u983 u984 u985 u986 u987 u988 u989 u990 u991 u992 u993 u994 u995 u996 u997 u998 u999 + u1000 u1001 u1002 u1003 u1004 u1005 u1006 u1007 u1008 u1009 u1010 u1011 u1012 u1013 u1014 u1015 u1016 u1017 u1018 u1019 + u1020 u1021 u1022 u1023 u1024 u1025 u1026 u1027 u1028 u1029 u1030 u1031 u1032 u1033 u1034 u1035 u1036 u1037 u1038 u1039 + u1040 u1041 u1042 u1043 u1044 u1045 u1046 u1047 u1048 u1049 u1050 u1051 u1052 u1053 u1054 u1055 u1056 u1057 u1058 u1059 + u1060 u1061 u1062 u1063 u1064 u1065 u1066 u1067 u1068 u1069 u1070 u1071 u1072 u1073 u1074 u1075 u1076 u1077 u1078 u1079 + u1080 u1081 u1082 u1083 u1084 u1085 u1086 u1087 u1088 u1089 u1090 u1091 u1092 u1093 u1094 u1095 u1096 u1097 u1098 u1099 + u1100 u1101 u1102 u1103 u1104 u1105 u1106 u1107 u1108 u1109 u1110 u1111 u1112 u1113 u1114 u1115 u1116 u1117 u1118 u1119 + u1120 u1121 u1122 u1123 u1124 u1125 u1126 u1127 u1128 u1129 u1130 u1131 u1132 u1133 u1134 u1135 u1136 u1137 u1138 u1139 + u1140 u1141 u1142 u1143 u1144 u1145 u1146 u1147 u1148 u1149 u1150 u1151 u1152 u1153 u1154 u1155 u1156 u1157 u1158 u1159 + u1160 u1161 u1162 u1163 u1164 u1165 u1166 u1167 u1168 u1169 u1170 u1171 u1172 u1173 u1174 u1175 u1176 u1177 u1178 u1179 + u1180 u1181 u1182 u1183 u1184 u1185 u1186 u1187 u1188 u1189 u1190 u1191 u1192 u1193 u1194 u1195 u1196 u1197 u1198 u1199 + u1200 u1201 u1202 u1203 u1204 u1205 u1206 u1207 u1208 u1209 u1210 u1211 u1212 u1213 u1214 u1215 u1216 u1217 u1218 u1219 + u1220 u1221 u1222 u1223 u1224 u1225 u1226 u1227 u1228 u1229 u1230 u1231 u1232 u1233 u1234 u1235 u1236 u1237 u1238 u1239 + u1240 u1241 u1242 u1243 u1244 u1245 u1246 u1247 u1248 u1249 u1250 u1251 u1252 u1253 u1254 u1255 u1256 u1257 u1258 u1259 + u1260 u1261 u1262 u1263 u1264 u1265 u1266 u1267 u1268 u1269 u1270 u1271 u1272 u1273 u1274 u1275 u1276 u1277 u1278 u1279 + u1280 u1281 u1282 u1283 u1284 u1285 u1286 u1287 u1288 u1289 u1290 u1291 u1292 u1293 u1294 u1295 u1296 u1297 u1298 u1299 + u1300 u1301 u1302 u1303 u1304 u1305 u1306 u1307 u1308 u1309 u1310 u1311 u1312 u1313 u1314 u1315 u1316 u1317 u1318 u1319 + u1320 u1321 u1322 u1323 u1324 u1325 u1326 u1327 u1328 u1329 u1330 u1331 u1332 u1333 u1334 u1335 u1336 u1337 u1338 u1339 + u1340 u1341 u1342 u1343 u1344 u1345 u1346 u1347 u1348 u1349 u1350 u1351 u1352 u1353 u1354 u1355 u1356 u1357 u1358 u1359 + u1360 u1361 u1362 u1363 u1364 u1365 u1366 u1367 u1368 u1369 u1370 u1371 u1372 u1373 u1374 u1375 u1376 u1377 u1378 u1379 + u1380 u1381 u1382 u1383 u1384 u1385 u1386 u1387 u1388 u1389 u1390 u1391 u1392 u1393 u1394 u1395 u1396 u1397 u1398 u1399 + u1400 u1401 u1402 u1403 u1404 u1405 u1406 u1407 u1408 u1409 u1410 u1411 u1412 u1413 u1414 u1415 u1416 u1417 u1418 u1419 + u1420 u1421 u1422 u1423 u1424 u1425 u1426 u1427 u1428 u1429 u1430 u1431 u1432 u1433 u1434 u1435 u1436 u1437 u1438 u1439 + u1440 u1441 u1442 u1443 u1444 u1445 u1446 u1447 u1448 u1449 u1450 u1451 u1452 u1453 u1454 u1455 u1456 u1457 u1458 u1459 + u1460 u1461 u1462 u1463 u1464 u1465 u1466 u1467 u1468 u1469 u1470 u1471 u1472 u1473 u1474 u1475 u1476 u1477 u1478 u1479 + u1480 u1481 u1482 u1483 u1484 u1485 u1486 u1487 u1488 u1489 u1490 u1491 u1492 u1493 u1494 u1495 u1496 u1497 u1498 u1499 + u1500 u1501 u1502 u1503 u1504 u1505 u1506 u1507 u1508 u1509 u1510 u1511 u1512 u1513 u1514 u1515 u1516 u1517 u1518 u1519 + u1520 u1521 u1522 u1523 u1524 u1525 u1526 u1527 u1528 u1529 u1530 u1531 u1532 u1533 u1534 u1535 u1536 u1537 u1538 u1539 + u1540 u1541 u1542 u1543 u1544 u1545 u1546 u1547 u1548 u1549 u1550 u1551 u1552 u1553 u1554 u1555 u1556 u1557 u1558 u1559 + u1560 u1561 u1562 u1563 u1564 u1565 u1566 u1567 u1568 u1569 u1570 u1571 u1572 u1573 u1574 u1575 u1576 u1577 u1578 u1579 + u1580 u1581 u1582 u1583 u1584 u1585 u1586 u1587 u1588 u1589 u1590 u1591 u1592 u1593 u1594 u1595 u1596 u1597 u1598 u1599 + u1600 u1601 u1602 u1603 u1604 u1605 u1606 u1607 u1608 u1609 u1610 u1611 u1612 u1613 u1614 u1615 u1616 u1617 u1618 u1619 + u1620 u1621 u1622 u1623 u1624 u1625 u1626 u1627 u1628 u1629 u1630 u1631 u1632 u1633 u1634 u1635 u1636 u1637 u1638 u1639 + u1640 u1641 u1642 u1643 u1644 u1645 u1646 u1647 u1648 u1649 u1650 u1651 u1652 u1653 u1654 u1655 u1656 u1657 u1658 u1659 + u1660 u1661 u1662 u1663 u1664 u1665 u1666 u1667 u1668 u1669 u1670 u1671 u1672 u1673 u1674 u1675 u1676 u1677 u1678 u1679 + u1680 u1681 u1682 u1683 u1684 u1685 u1686 u1687 u1688 u1689 u1690 u1691 u1692 u1693 u1694 u1695 u1696 u1697 u1698 u1699 + u1700 u1701 u1702 u1703 u1704 u1705 u1706 u1707 u1708 u1709 u1710 u1711 u1712 u1713 u1714 u1715 u1716 u1717 u1718 u1719 + u1720 u1721 u1722 u1723 u1724 u1725 u1726 u1727 u1728 u1729 u1730 u1731 u1732 u1733 u1734 u1735 u1736 u1737 u1738 u1739 + u1740 u1741 u1742 u1743 u1744 u1745 u1746 u1747 u1748 u1749 u1750 u1751 u1752 u1753 u1754 u1755 u1756 u1757 u1758 u1759 + u1760 u1761 u1762 u1763 u1764 u1765 u1766 u1767 u1768 u1769 u1770 u1771 u1772 u1773 u1774 u1775 u1776 u1777 u1778 u1779 + u1780 u1781 u1782 u1783 u1784 u1785 u1786 u1787 u1788 u1789 u1790 u1791 u1792 u1793 u1794 u1795 u1796 u1797 u1798 u1799 + u1800 u1801 u1802 u1803 u1804 u1805 u1806 u1807 u1808 u1809 u1810 u1811 u1812 u1813 u1814 u1815 u1816 u1817 u1818 u1819 + u1820 u1821 u1822 u1823 u1824 u1825 u1826 u1827 u1828 u1829 u1830 u1831 u1832 u1833 u1834 u1835 u1836 u1837 u1838 u1839 + u1840 u1841 u1842 u1843 u1844 u1845 u1846 u1847 u1848 u1849 u1850 u1851 u1852 u1853 u1854 u1855 u1856 u1857 u1858 u1859 + u1860 u1861 u1862 u1863 u1864 u1865 u1866 u1867 u1868 u1869 u1870 u1871 u1872 u1873 u1874 u1875 u1876 u1877 u1878 u1879 + u1880 u1881 u1882 u1883 u1884 u1885 u1886 u1887 u1888 u1889 u1890 u1891 u1892 u1893 u1894 u1895 u1896 u1897 u1898 u1899 + u1900 u1901 u1902 u1903 u1904 u1905 u1906 u1907 u1908 u1909 u1910 u1911 u1912 u1913 u1914 u1915 u1916 u1917 u1918 u1919 + u1920 u1921 u1922 u1923 u1924 u1925 u1926 u1927 u1928 u1929 u1930 u1931 u1932 u1933 u1934 u1935 u1936 u1937 u1938 u1939 + u1940 u1941 u1942 u1943 u1944 u1945 u1946 u1947 u1948 u1949 u1950 u1951 u1952 u1953 u1954 u1955 u1956 u1957 u1958 u1959 + u1960 u1961 u1962 u1963 u1964 u1965 u1966 u1967 u1968 u1969 u1970 u1971 u1972 u1973 u1974 u1975 u1976 u1977 u1978 u1979 + u1980 u1981 u1982 u1983 u1984 u1985 u1986 u1987 u1988 u1989 u1990 u1991 u1992 u1993 u1994 u1995 u1996 u1997 u1998 u1999 + u2000 u2001 u2002 u2003 u2004 u2005 u2006 u2007 u2008 u2009 u2010 u2011 u2012 u2013 u2014 u2015 u2016 u2017 u2018 u2019 + u2020 u2021 u2022 u2023 u2024 u2025 u2026 u2027 u2028 u2029 u2030 u2031 u2032 u2033 u2034 u2035 u2036 u2037 u2038 u2039 + u2040 u2041 u2042 u2043 u2044 u2045 u2046 u2047 u2048 u2049 u2050 u2051 u2052 u2053 u2054 u2055 u2056 u2057 u2058 u2059 + u2060 u2061 u2062 u2063 u2064 u2065 u2066 u2067 u2068 u2069 u2070 u2071 u2072 u2073 u2074 u2075 u2076 u2077 u2078 u2079 + u2080 u2081 u2082 u2083 u2084 u2085 u2086 u2087 u2088 u2089 u2090 u2091 u2092 u2093 u2094 u2095 u2096 u2097 u2098 u2099 + u2100 u2101 u2102 u2103 u2104 u2105 u2106 u2107 u2108 u2109 u2110 u2111 u2112 u2113 u2114 u2115 u2116 u2117 u2118 u2119 + u2120 u2121 u2122 u2123 u2124 u2125 u2126 u2127 u2128 u2129 u2130 u2131 u2132 u2133 u2134 u2135 u2136 u2137 u2138 u2139 + u2140 u2141 u2142 u2143 u2144 u2145 u2146 u2147 u2148 u2149 u2150 u2151 u2152 u2153 u2154 u2155 u2156 u2157 u2158 u2159 + u2160 u2161 u2162 u2163 u2164 u2165 u2166 u2167 u2168 u2169 u2170 u2171 u2172 u2173 u2174 u2175 u2176 u2177 u2178 u2179 + u2180 u2181 u2182 u2183 u2184 u2185 u2186 u2187 u2188 u2189 u2190 u2191 u2192 u2193 u2194 u2195 u2196 u2197 u2198 u2199 + u2200 u2201 u2202 u2203 u2204 u2205 u2206 u2207 u2208 u2209 u2210 u2211 u2212 u2213 u2214 u2215 u2216 u2217 u2218 u2219 + u2220 u2221 u2222 u2223 u2224 u2225 u2226 u2227 u2228 u2229 u2230 u2231 u2232 u2233 u2234 u2235 u2236 u2237 u2238 u2239 + u2240 u2241 u2242 u2243 u2244 u2245 u2246 u2247 u2248 u2249 u2250 u2251 u2252 u2253 u2254 u2255 u2256 u2257 u2258 u2259 + u2260 u2261 u2262 u2263 u2264 u2265 u2266 u2267 u2268 u2269 u2270 u2271 u2272 u2273 u2274 u2275 u2276 u2277 u2278 u2279 + u2280 u2281 u2282 u2283 u2284 u2285 u2286 u2287 u2288 u2289 u2290 u2291 u2292 u2293 u2294 u2295 u2296 u2297 u2298 u2299 + u2300 u2301 u2302 u2303 u2304 u2305 u2306 u2307 u2308 u2309 u2310 u2311 u2312 u2313 u2314 u2315 u2316 u2317 u2318 u2319 + u2320 u2321 u2322 u2323 u2324 u2325 u2326 u2327 u2328 u2329 u2330 u2331 u2332 u2333 u2334 u2335 u2336 u2337 u2338 u2339 + u2340 u2341 u2342 u2343 u2344 u2345 u2346 u2347 u2348 u2349 u2350 u2351 u2352 u2353 u2354 u2355 u2356 u2357 u2358 u2359 + u2360 u2361 u2362 u2363 u2364 u2365 u2366 u2367 u2368 u2369 u2370 u2371 u2372 u2373 u2374 u2375 u2376 u2377 u2378 u2379 + u2380 u2381 u2382 u2383 u2384 u2385 u2386 u2387 u2388 u2389 u2390 u2391 u2392 u2393 u2394 u2395 u2396 u2397 u2398 u2399 + u2400 u2401 u2402 u2403 u2404 u2405 u2406 u2407 u2408 u2409 u2410 u2411 u2412 u2413 u2414 u2415 u2416 u2417 u2418 u2419 + u2420 u2421 u2422 u2423 u2424 u2425 u2426 u2427 u2428 u2429 u2430 u2431 u2432 u2433 u2434 u2435 u2436 u2437 u2438 u2439 + u2440 u2441 u2442 u2443 u2444 u2445 u2446 u2447 u2448 u2449 u2450 u2451 u2452 u2453 u2454 u2455 u2456 u2457 u2458 u2459 + u2460 u2461 u2462 u2463 u2464 u2465 u2466 u2467 u2468 u2469 u2470 u2471 u2472 u2473 u2474 u2475 u2476 u2477 u2478 u2479 + u2480 u2481 u2482 u2483 u2484 u2485 u2486 u2487 u2488 u2489 u2490 u2491 u2492 u2493 u2494 u2495 u2496 u2497 u2498 u2499)) + + (computation-test (list 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100) + 0)"#, + ); - assert_eq!(vm_execute(&program).err().unwrap(), CheckErrors::ExecutionTimeExpired.into()); + assert_eq!( + vm_execute(&program).err().unwrap(), + CheckErrors::ExecutionTimeExpired.into() + ); } From 079be153f13848de3187bbd830bfc1598a177245 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 18 Feb 2025 16:33:52 +0100 Subject: [PATCH 023/238] added test for max execution time --- Cargo.lock | 1 + clarity/Cargo.toml | 1 + clarity/src/vm/mod.rs | 20 +- clarity/src/vm/tests/simple_apply_eval.rs | 623 +--------------------- 4 files changed, 27 insertions(+), 618 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a51010ecdf3..96ad10b6cb3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -634,6 +634,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_stacker", + "serial_test", "slog", "stacks-common", "time 0.2.27", diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 284e856e498..46a1c179e8f 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -47,6 +47,7 @@ mutants = "0.0.3" # a nightly rustc regression (35dbef235 2021-03-02) prevents criterion from compiling # but it isn't necessary for tests: only benchmarks. therefore, commenting out for now. # criterion = "0.3" +serial_test = "3.2.0" [features] default = ["canonical"] diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 71e1527c5e3..f0ec3e84ed7 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -87,7 +87,11 @@ use crate::vm::types::{PrincipalData, TypeSignature}; pub use crate::vm::version::ClarityVersion; pub const MAX_CALL_STACK_DEPTH: usize = 64; -pub const MAX_EXECUTION_TIME_SECS: u64 = 10; +pub const MAX_EXECUTION_TIME_SECS: u64 = 30; + +#[cfg(test)] +static TEST_MAX_EXECUTION_TIME: std::sync::Mutex = + std::sync::Mutex::new(Duration::from_secs(MAX_EXECUTION_TIME_SECS)); #[derive(Debug, Clone)] pub struct ParsedContract { @@ -305,6 +309,16 @@ pub fn apply( } } +#[cfg(not(test))] +fn check_max_execution_time_expired(global_context: &GlobalContext) -> bool { + global_context.execution_time_tracker.elapsed() > Duration::from_secs(MAX_EXECUTION_TIME_SECS) +} + +#[cfg(test)] +fn check_max_execution_time_expired(global_context: &GlobalContext) -> bool { + global_context.execution_time_tracker.elapsed() > *TEST_MAX_EXECUTION_TIME.lock().unwrap() +} + pub fn eval( exp: &SymbolicExpression, env: &mut Environment, @@ -314,9 +328,7 @@ pub fn eval( Atom, AtomValue, Field, List, LiteralValue, TraitReference, }; - if env.global_context.execution_time_tracker.elapsed() - > Duration::from_secs(MAX_EXECUTION_TIME_SECS) - { + if check_max_execution_time_expired(env.global_context) { warn!( "ExecutionTime expired while running {:?} ({:?} elapsed)", exp, diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 0b3669f06b3..2390eb2d7e0 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -39,8 +39,12 @@ use crate::vm::types::{ use crate::vm::{ eval, execute as vm_execute, execute_v2 as vm_execute_v2, execute_with_parameters, CallStack, ClarityVersion, ContractContext, Environment, GlobalContext, LocalContext, Value, + MAX_EXECUTION_TIME_SECS, TEST_MAX_EXECUTION_TIME, }; +use serial_test::serial; +use std::time::Duration; + #[test] fn test_doubly_defined_persisted_vars() { let tests = [ @@ -1765,623 +1769,14 @@ fn test_chain_id() { } #[test] +#[serial] fn test_execution_time_expiration() { - let program = String::from( - r#";; Block Limits -;; { -;; "read_count": 15_000, -;; "read_length": 100_000_000, -;; "runtime": 5_000_000_000, -;; "write_count": 15_000, -;; "write_length": 15_000_000, -;; } - -(define-constant ERR_UNWRAP (err u101)) - -;; Variables -(define-data-var value-used-read-count uint u0) -(define-data-var temp-list (list 2000 uint) - (list )) - -(define-map test-map uint uint) - -;; ;; Functions -(define-private (initialize) - (begin - (var-set temp-list (list )) - (var-set value-used-read-count u0) - ) -) - -;; ;; Test read count limit -(define-private (read-count-one (current-number uint)) - (begin - (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) - (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) - (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) - (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) - (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) - (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) - (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) - (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) - (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) - (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) (var-get value-used-read-count) - (ok true) - ) -) - -(define-public (read-count-test (current-numbers (list 1000 uint))) - (begin - (initialize) - (ok (map read-count-one current-numbers)) - ) -) - - -;; ;; Test read length limit -(define-private (read-length-one (position uint)) - (begin - (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) - (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) - (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) - (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) - (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) (var-get value-used-read-length) - (ok true) - ) -) - -(define-public (read-length-test (current-numbers (list 1200 uint))) - (begin - (initialize) - (ok (map read-length-one current-numbers)) - ) -) - -;; Test write count limit -;; (define-private (write-count-one (current-number uint)) -;; (begin -;; (var-set value-used-read-count (+ (* current-number current-number) current-number)) -;; (var-set value-used-read-count (* u2 (var-get value-used-read-count))) -;; (var-set value-used-read-count u3) -;; ) -;; ) - -;; Test write count limit -(define-private (write-count-one (current-number uint)) - (begin - ;; Counts a write count as a read count as well - ;; (var-set value-used-read-count u2) - ;; (var-set value-used-read-count u2) - ;; (var-set value-used-read-count u2) - ;; (var-set value-used-read-count u2) - ;; (var-set value-used-read-count u2) - ;; (var-set value-used-read-count u2) - ;; (var-set value-used-read-count u2) - ;; (var-set value-used-read-count u2) - ;; (var-set value-used-read-count u2) - ;; (var-set value-used-read-count u2) - ;; (var-set value-used-read-count u2) - ;; Counts a write count as a read count as well - ;; (map-set test-map current-number u1) - ;; (map-set test-map (+ current-number u1000) u2) - ;; (map-set test-map (+ current-number u2000) u3) - ;; (map-set test-map (+ current-number u3000) u4) - ;; (map-set test-map (+ current-number u4000) u5) - ;; (map-set test-map (+ current-number u5000) u6) - ;; (map-set test-map (+ current-number u6000) u7) - ;; (map-set test-map (+ current-number u7000) u8) - ;; (map-set test-map (+ current-number u8000) u9) - ;; (map-set test-map (+ current-number u9000) u10) - ;; (map-set test-map (+ current-number u10000) u11) - ;; (map-set test-map (+ current-number u11000) u12) - ;; (map-set test-map (+ current-number u12000) u13) - ;; (map-set test-map (+ current-number u13000) u14) - ;; (map-set test-map (+ current-number u14000) u15) - ;; Counts a write count as a read count as well - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (var-set temp-list (unwrap! (as-max-len? (append (list ) current-number) u2000) ERR_UNWRAP)) - (ok true) - ) -) - -(define-public (write-count-test (current-numbers (list 1000 uint))) - (begin - (initialize ) - (ok (map write-count-one current-numbers)) - ) -) - - -;; Test write length limit -(define-private (write-length-one (current-number uint)) - (begin - (var-set temp-list (unwrap! (as-max-len? (append (var-get temp-list) current-number) u2000) ERR_UNWRAP)) - (ok true) - ) -) - -(define-public (write-length-test (current-numbers (list 1000 uint))) - (begin - (initialize ) - ;; Chain multiple write operations - (map write-length-one current-numbers) - (map write-length-one current-numbers) - (map write-length-one current-numbers) - (map write-length-one current-numbers) - (map write-length-one current-numbers) - ;; Store final result - (var-set value-used-read-length (var-get temp-list)) - (ok true) - ) -) - -;; Test computation limit -(define-private (computation-one (x int) (y int)) - (begin - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - (+ - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - (+ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* (/ (* x x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x) x)) - ) - - y - ) -) - -(define-private (computation-three (x int) (y int)) - (+ (computation-one x y) (computation-one x y) (computation-one x y)) -) - -(define-public (computation-test (l (list 1000 int)) (init int)) - (begin - (initialize ) - (ok (fold computation-three l init)) - ) -) - -;; List of values -(define-data-var value-used-read-length (list 5000 uint) - (list - u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11 u12 u13 u14 u15 u16 u17 u18 u19 - u20 u21 u22 u23 u24 u25 u26 u27 u28 u29 u30 u31 u32 u33 u34 u35 u36 u37 u38 u39 - u40 u41 u42 u43 u44 u45 u46 u47 u48 u49 u50 u51 u52 u53 u54 u55 u56 u57 u58 u59 - u60 u61 u62 u63 u64 u65 u66 u67 u68 u69 u70 u71 u72 u73 u74 u75 u76 u77 u78 u79 - u80 u81 u82 u83 u84 u85 u86 u87 u88 u89 u90 u91 u92 u93 u94 u95 u96 u97 u98 u99 - u100 u101 u102 u103 u104 u105 u106 u107 u108 u109 u110 u111 u112 u113 u114 u115 u116 u117 u118 u119 - u120 u121 u122 u123 u124 u125 u126 u127 u128 u129 u130 u131 u132 u133 u134 u135 u136 u137 u138 u139 - u140 u141 u142 u143 u144 u145 u146 u147 u148 u149 u150 u151 u152 u153 u154 u155 u156 u157 u158 u159 - u160 u161 u162 u163 u164 u165 u166 u167 u168 u169 u170 u171 u172 u173 u174 u175 u176 u177 u178 u179 - u180 u181 u182 u183 u184 u185 u186 u187 u188 u189 u190 u191 u192 u193 u194 u195 u196 u197 u198 u199 - u200 u201 u202 u203 u204 u205 u206 u207 u208 u209 u210 u211 u212 u213 u214 u215 u216 u217 u218 u219 - u220 u221 u222 u223 u224 u225 u226 u227 u228 u229 u230 u231 u232 u233 u234 u235 u236 u237 u238 u239 - u240 u241 u242 u243 u244 u245 u246 u247 u248 u249 u250 u251 u252 u253 u254 u255 u256 u257 u258 u259 - u260 u261 u262 u263 u264 u265 u266 u267 u268 u269 u270 u271 u272 u273 u274 u275 u276 u277 u278 u279 - u280 u281 u282 u283 u284 u285 u286 u287 u288 u289 u290 u291 u292 u293 u294 u295 u296 u297 u298 u299 - u300 u301 u302 u303 u304 u305 u306 u307 u308 u309 u310 u311 u312 u313 u314 u315 u316 u317 u318 u319 - u320 u321 u322 u323 u324 u325 u326 u327 u328 u329 u330 u331 u332 u333 u334 u335 u336 u337 u338 u339 - u340 u341 u342 u343 u344 u345 u346 u347 u348 u349 u350 u351 u352 u353 u354 u355 u356 u357 u358 u359 - u360 u361 u362 u363 u364 u365 u366 u367 u368 u369 u370 u371 u372 u373 u374 u375 u376 u377 u378 u379 - u380 u381 u382 u383 u384 u385 u386 u387 u388 u389 u390 u391 u392 u393 u394 u395 u396 u397 u398 u399 - u400 u401 u402 u403 u404 u405 u406 u407 u408 u409 u410 u411 u412 u413 u414 u415 u416 u417 u418 u419 - u420 u421 u422 u423 u424 u425 u426 u427 u428 u429 u430 u431 u432 u433 u434 u435 u436 u437 u438 u439 - u440 u441 u442 u443 u444 u445 u446 u447 u448 u449 u450 u451 u452 u453 u454 u455 u456 u457 u458 u459 - u460 u461 u462 u463 u464 u465 u466 u467 u468 u469 u470 u471 u472 u473 u474 u475 u476 u477 u478 u479 - u480 u481 u482 u483 u484 u485 u486 u487 u488 u489 u490 u491 u492 u493 u494 u495 u496 u497 u498 u499 - u500 u501 u502 u503 u504 u505 u506 u507 u508 u509 u510 u511 u512 u513 u514 u515 u516 u517 u518 u519 - u520 u521 u522 u523 u524 u525 u526 u527 u528 u529 u530 u531 u532 u533 u534 u535 u536 u537 u538 u539 - u540 u541 u542 u543 u544 u545 u546 u547 u548 u549 u550 u551 u552 u553 u554 u555 u556 u557 u558 u559 - u560 u561 u562 u563 u564 u565 u566 u567 u568 u569 u570 u571 u572 u573 u574 u575 u576 u577 u578 u579 - u580 u581 u582 u583 u584 u585 u586 u587 u588 u589 u590 u591 u592 u593 u594 u595 u596 u597 u598 u599 - u600 u601 u602 u603 u604 u605 u606 u607 u608 u609 u610 u611 u612 u613 u614 u615 u616 u617 u618 u619 - u620 u621 u622 u623 u624 u625 u626 u627 u628 u629 u630 u631 u632 u633 u634 u635 u636 u637 u638 u639 - u640 u641 u642 u643 u644 u645 u646 u647 u648 u649 u650 u651 u652 u653 u654 u655 u656 u657 u658 u659 - u660 u661 u662 u663 u664 u665 u666 u667 u668 u669 u670 u671 u672 u673 u674 u675 u676 u677 u678 u679 - u680 u681 u682 u683 u684 u685 u686 u687 u688 u689 u690 u691 u692 u693 u694 u695 u696 u697 u698 u699 - u700 u701 u702 u703 u704 u705 u706 u707 u708 u709 u710 u711 u712 u713 u714 u715 u716 u717 u718 u719 - u720 u721 u722 u723 u724 u725 u726 u727 u728 u729 u730 u731 u732 u733 u734 u735 u736 u737 u738 u739 - u740 u741 u742 u743 u744 u745 u746 u747 u748 u749 u750 u751 u752 u753 u754 u755 u756 u757 u758 u759 - u760 u761 u762 u763 u764 u765 u766 u767 u768 u769 u770 u771 u772 u773 u774 u775 u776 u777 u778 u779 - u780 u781 u782 u783 u784 u785 u786 u787 u788 u789 u790 u791 u792 u793 u794 u795 u796 u797 u798 u799 - u800 u801 u802 u803 u804 u805 u806 u807 u808 u809 u810 u811 u812 u813 u814 u815 u816 u817 u818 u819 - u820 u821 u822 u823 u824 u825 u826 u827 u828 u829 u830 u831 u832 u833 u834 u835 u836 u837 u838 u839 - u840 u841 u842 u843 u844 u845 u846 u847 u848 u849 u850 u851 u852 u853 u854 u855 u856 u857 u858 u859 - u860 u861 u862 u863 u864 u865 u866 u867 u868 u869 u870 u871 u872 u873 u874 u875 u876 u877 u878 u879 - u880 u881 u882 u883 u884 u885 u886 u887 u888 u889 u890 u891 u892 u893 u894 u895 u896 u897 u898 u899 - u900 u901 u902 u903 u904 u905 u906 u907 u908 u909 u910 u911 u912 u913 u914 u915 u916 u917 u918 u919 - u920 u921 u922 u923 u924 u925 u926 u927 u928 u929 u930 u931 u932 u933 u934 u935 u936 u937 u938 u939 - u940 u941 u942 u943 u944 u945 u946 u947 u948 u949 u950 u951 u952 u953 u954 u955 u956 u957 u958 u959 - u960 u961 u962 u963 u964 u965 u966 u967 u968 u969 u970 u971 u972 u973 u974 u975 u976 u977 u978 u979 - u980 u981 u982 u983 u984 u985 u986 u987 u988 u989 u990 u991 u992 u993 u994 u995 u996 u997 u998 u999 - u1000 u1001 u1002 u1003 u1004 u1005 u1006 u1007 u1008 u1009 u1010 u1011 u1012 u1013 u1014 u1015 u1016 u1017 u1018 u1019 - u1020 u1021 u1022 u1023 u1024 u1025 u1026 u1027 u1028 u1029 u1030 u1031 u1032 u1033 u1034 u1035 u1036 u1037 u1038 u1039 - u1040 u1041 u1042 u1043 u1044 u1045 u1046 u1047 u1048 u1049 u1050 u1051 u1052 u1053 u1054 u1055 u1056 u1057 u1058 u1059 - u1060 u1061 u1062 u1063 u1064 u1065 u1066 u1067 u1068 u1069 u1070 u1071 u1072 u1073 u1074 u1075 u1076 u1077 u1078 u1079 - u1080 u1081 u1082 u1083 u1084 u1085 u1086 u1087 u1088 u1089 u1090 u1091 u1092 u1093 u1094 u1095 u1096 u1097 u1098 u1099 - u1100 u1101 u1102 u1103 u1104 u1105 u1106 u1107 u1108 u1109 u1110 u1111 u1112 u1113 u1114 u1115 u1116 u1117 u1118 u1119 - u1120 u1121 u1122 u1123 u1124 u1125 u1126 u1127 u1128 u1129 u1130 u1131 u1132 u1133 u1134 u1135 u1136 u1137 u1138 u1139 - u1140 u1141 u1142 u1143 u1144 u1145 u1146 u1147 u1148 u1149 u1150 u1151 u1152 u1153 u1154 u1155 u1156 u1157 u1158 u1159 - u1160 u1161 u1162 u1163 u1164 u1165 u1166 u1167 u1168 u1169 u1170 u1171 u1172 u1173 u1174 u1175 u1176 u1177 u1178 u1179 - u1180 u1181 u1182 u1183 u1184 u1185 u1186 u1187 u1188 u1189 u1190 u1191 u1192 u1193 u1194 u1195 u1196 u1197 u1198 u1199 - u1200 u1201 u1202 u1203 u1204 u1205 u1206 u1207 u1208 u1209 u1210 u1211 u1212 u1213 u1214 u1215 u1216 u1217 u1218 u1219 - u1220 u1221 u1222 u1223 u1224 u1225 u1226 u1227 u1228 u1229 u1230 u1231 u1232 u1233 u1234 u1235 u1236 u1237 u1238 u1239 - u1240 u1241 u1242 u1243 u1244 u1245 u1246 u1247 u1248 u1249 u1250 u1251 u1252 u1253 u1254 u1255 u1256 u1257 u1258 u1259 - u1260 u1261 u1262 u1263 u1264 u1265 u1266 u1267 u1268 u1269 u1270 u1271 u1272 u1273 u1274 u1275 u1276 u1277 u1278 u1279 - u1280 u1281 u1282 u1283 u1284 u1285 u1286 u1287 u1288 u1289 u1290 u1291 u1292 u1293 u1294 u1295 u1296 u1297 u1298 u1299 - u1300 u1301 u1302 u1303 u1304 u1305 u1306 u1307 u1308 u1309 u1310 u1311 u1312 u1313 u1314 u1315 u1316 u1317 u1318 u1319 - u1320 u1321 u1322 u1323 u1324 u1325 u1326 u1327 u1328 u1329 u1330 u1331 u1332 u1333 u1334 u1335 u1336 u1337 u1338 u1339 - u1340 u1341 u1342 u1343 u1344 u1345 u1346 u1347 u1348 u1349 u1350 u1351 u1352 u1353 u1354 u1355 u1356 u1357 u1358 u1359 - u1360 u1361 u1362 u1363 u1364 u1365 u1366 u1367 u1368 u1369 u1370 u1371 u1372 u1373 u1374 u1375 u1376 u1377 u1378 u1379 - u1380 u1381 u1382 u1383 u1384 u1385 u1386 u1387 u1388 u1389 u1390 u1391 u1392 u1393 u1394 u1395 u1396 u1397 u1398 u1399 - u1400 u1401 u1402 u1403 u1404 u1405 u1406 u1407 u1408 u1409 u1410 u1411 u1412 u1413 u1414 u1415 u1416 u1417 u1418 u1419 - u1420 u1421 u1422 u1423 u1424 u1425 u1426 u1427 u1428 u1429 u1430 u1431 u1432 u1433 u1434 u1435 u1436 u1437 u1438 u1439 - u1440 u1441 u1442 u1443 u1444 u1445 u1446 u1447 u1448 u1449 u1450 u1451 u1452 u1453 u1454 u1455 u1456 u1457 u1458 u1459 - u1460 u1461 u1462 u1463 u1464 u1465 u1466 u1467 u1468 u1469 u1470 u1471 u1472 u1473 u1474 u1475 u1476 u1477 u1478 u1479 - u1480 u1481 u1482 u1483 u1484 u1485 u1486 u1487 u1488 u1489 u1490 u1491 u1492 u1493 u1494 u1495 u1496 u1497 u1498 u1499 - u1500 u1501 u1502 u1503 u1504 u1505 u1506 u1507 u1508 u1509 u1510 u1511 u1512 u1513 u1514 u1515 u1516 u1517 u1518 u1519 - u1520 u1521 u1522 u1523 u1524 u1525 u1526 u1527 u1528 u1529 u1530 u1531 u1532 u1533 u1534 u1535 u1536 u1537 u1538 u1539 - u1540 u1541 u1542 u1543 u1544 u1545 u1546 u1547 u1548 u1549 u1550 u1551 u1552 u1553 u1554 u1555 u1556 u1557 u1558 u1559 - u1560 u1561 u1562 u1563 u1564 u1565 u1566 u1567 u1568 u1569 u1570 u1571 u1572 u1573 u1574 u1575 u1576 u1577 u1578 u1579 - u1580 u1581 u1582 u1583 u1584 u1585 u1586 u1587 u1588 u1589 u1590 u1591 u1592 u1593 u1594 u1595 u1596 u1597 u1598 u1599 - u1600 u1601 u1602 u1603 u1604 u1605 u1606 u1607 u1608 u1609 u1610 u1611 u1612 u1613 u1614 u1615 u1616 u1617 u1618 u1619 - u1620 u1621 u1622 u1623 u1624 u1625 u1626 u1627 u1628 u1629 u1630 u1631 u1632 u1633 u1634 u1635 u1636 u1637 u1638 u1639 - u1640 u1641 u1642 u1643 u1644 u1645 u1646 u1647 u1648 u1649 u1650 u1651 u1652 u1653 u1654 u1655 u1656 u1657 u1658 u1659 - u1660 u1661 u1662 u1663 u1664 u1665 u1666 u1667 u1668 u1669 u1670 u1671 u1672 u1673 u1674 u1675 u1676 u1677 u1678 u1679 - u1680 u1681 u1682 u1683 u1684 u1685 u1686 u1687 u1688 u1689 u1690 u1691 u1692 u1693 u1694 u1695 u1696 u1697 u1698 u1699 - u1700 u1701 u1702 u1703 u1704 u1705 u1706 u1707 u1708 u1709 u1710 u1711 u1712 u1713 u1714 u1715 u1716 u1717 u1718 u1719 - u1720 u1721 u1722 u1723 u1724 u1725 u1726 u1727 u1728 u1729 u1730 u1731 u1732 u1733 u1734 u1735 u1736 u1737 u1738 u1739 - u1740 u1741 u1742 u1743 u1744 u1745 u1746 u1747 u1748 u1749 u1750 u1751 u1752 u1753 u1754 u1755 u1756 u1757 u1758 u1759 - u1760 u1761 u1762 u1763 u1764 u1765 u1766 u1767 u1768 u1769 u1770 u1771 u1772 u1773 u1774 u1775 u1776 u1777 u1778 u1779 - u1780 u1781 u1782 u1783 u1784 u1785 u1786 u1787 u1788 u1789 u1790 u1791 u1792 u1793 u1794 u1795 u1796 u1797 u1798 u1799 - u1800 u1801 u1802 u1803 u1804 u1805 u1806 u1807 u1808 u1809 u1810 u1811 u1812 u1813 u1814 u1815 u1816 u1817 u1818 u1819 - u1820 u1821 u1822 u1823 u1824 u1825 u1826 u1827 u1828 u1829 u1830 u1831 u1832 u1833 u1834 u1835 u1836 u1837 u1838 u1839 - u1840 u1841 u1842 u1843 u1844 u1845 u1846 u1847 u1848 u1849 u1850 u1851 u1852 u1853 u1854 u1855 u1856 u1857 u1858 u1859 - u1860 u1861 u1862 u1863 u1864 u1865 u1866 u1867 u1868 u1869 u1870 u1871 u1872 u1873 u1874 u1875 u1876 u1877 u1878 u1879 - u1880 u1881 u1882 u1883 u1884 u1885 u1886 u1887 u1888 u1889 u1890 u1891 u1892 u1893 u1894 u1895 u1896 u1897 u1898 u1899 - u1900 u1901 u1902 u1903 u1904 u1905 u1906 u1907 u1908 u1909 u1910 u1911 u1912 u1913 u1914 u1915 u1916 u1917 u1918 u1919 - u1920 u1921 u1922 u1923 u1924 u1925 u1926 u1927 u1928 u1929 u1930 u1931 u1932 u1933 u1934 u1935 u1936 u1937 u1938 u1939 - u1940 u1941 u1942 u1943 u1944 u1945 u1946 u1947 u1948 u1949 u1950 u1951 u1952 u1953 u1954 u1955 u1956 u1957 u1958 u1959 - u1960 u1961 u1962 u1963 u1964 u1965 u1966 u1967 u1968 u1969 u1970 u1971 u1972 u1973 u1974 u1975 u1976 u1977 u1978 u1979 - u1980 u1981 u1982 u1983 u1984 u1985 u1986 u1987 u1988 u1989 u1990 u1991 u1992 u1993 u1994 u1995 u1996 u1997 u1998 u1999 - u2000 u2001 u2002 u2003 u2004 u2005 u2006 u2007 u2008 u2009 u2010 u2011 u2012 u2013 u2014 u2015 u2016 u2017 u2018 u2019 - u2020 u2021 u2022 u2023 u2024 u2025 u2026 u2027 u2028 u2029 u2030 u2031 u2032 u2033 u2034 u2035 u2036 u2037 u2038 u2039 - u2040 u2041 u2042 u2043 u2044 u2045 u2046 u2047 u2048 u2049 u2050 u2051 u2052 u2053 u2054 u2055 u2056 u2057 u2058 u2059 - u2060 u2061 u2062 u2063 u2064 u2065 u2066 u2067 u2068 u2069 u2070 u2071 u2072 u2073 u2074 u2075 u2076 u2077 u2078 u2079 - u2080 u2081 u2082 u2083 u2084 u2085 u2086 u2087 u2088 u2089 u2090 u2091 u2092 u2093 u2094 u2095 u2096 u2097 u2098 u2099 - u2100 u2101 u2102 u2103 u2104 u2105 u2106 u2107 u2108 u2109 u2110 u2111 u2112 u2113 u2114 u2115 u2116 u2117 u2118 u2119 - u2120 u2121 u2122 u2123 u2124 u2125 u2126 u2127 u2128 u2129 u2130 u2131 u2132 u2133 u2134 u2135 u2136 u2137 u2138 u2139 - u2140 u2141 u2142 u2143 u2144 u2145 u2146 u2147 u2148 u2149 u2150 u2151 u2152 u2153 u2154 u2155 u2156 u2157 u2158 u2159 - u2160 u2161 u2162 u2163 u2164 u2165 u2166 u2167 u2168 u2169 u2170 u2171 u2172 u2173 u2174 u2175 u2176 u2177 u2178 u2179 - u2180 u2181 u2182 u2183 u2184 u2185 u2186 u2187 u2188 u2189 u2190 u2191 u2192 u2193 u2194 u2195 u2196 u2197 u2198 u2199 - u2200 u2201 u2202 u2203 u2204 u2205 u2206 u2207 u2208 u2209 u2210 u2211 u2212 u2213 u2214 u2215 u2216 u2217 u2218 u2219 - u2220 u2221 u2222 u2223 u2224 u2225 u2226 u2227 u2228 u2229 u2230 u2231 u2232 u2233 u2234 u2235 u2236 u2237 u2238 u2239 - u2240 u2241 u2242 u2243 u2244 u2245 u2246 u2247 u2248 u2249 u2250 u2251 u2252 u2253 u2254 u2255 u2256 u2257 u2258 u2259 - u2260 u2261 u2262 u2263 u2264 u2265 u2266 u2267 u2268 u2269 u2270 u2271 u2272 u2273 u2274 u2275 u2276 u2277 u2278 u2279 - u2280 u2281 u2282 u2283 u2284 u2285 u2286 u2287 u2288 u2289 u2290 u2291 u2292 u2293 u2294 u2295 u2296 u2297 u2298 u2299 - u2300 u2301 u2302 u2303 u2304 u2305 u2306 u2307 u2308 u2309 u2310 u2311 u2312 u2313 u2314 u2315 u2316 u2317 u2318 u2319 - u2320 u2321 u2322 u2323 u2324 u2325 u2326 u2327 u2328 u2329 u2330 u2331 u2332 u2333 u2334 u2335 u2336 u2337 u2338 u2339 - u2340 u2341 u2342 u2343 u2344 u2345 u2346 u2347 u2348 u2349 u2350 u2351 u2352 u2353 u2354 u2355 u2356 u2357 u2358 u2359 - u2360 u2361 u2362 u2363 u2364 u2365 u2366 u2367 u2368 u2369 u2370 u2371 u2372 u2373 u2374 u2375 u2376 u2377 u2378 u2379 - u2380 u2381 u2382 u2383 u2384 u2385 u2386 u2387 u2388 u2389 u2390 u2391 u2392 u2393 u2394 u2395 u2396 u2397 u2398 u2399 - u2400 u2401 u2402 u2403 u2404 u2405 u2406 u2407 u2408 u2409 u2410 u2411 u2412 u2413 u2414 u2415 u2416 u2417 u2418 u2419 - u2420 u2421 u2422 u2423 u2424 u2425 u2426 u2427 u2428 u2429 u2430 u2431 u2432 u2433 u2434 u2435 u2436 u2437 u2438 u2439 - u2440 u2441 u2442 u2443 u2444 u2445 u2446 u2447 u2448 u2449 u2450 u2451 u2452 u2453 u2454 u2455 u2456 u2457 u2458 u2459 - u2460 u2461 u2462 u2463 u2464 u2465 u2466 u2467 u2468 u2469 u2470 u2471 u2472 u2473 u2474 u2475 u2476 u2477 u2478 u2479 - u2480 u2481 u2482 u2483 u2484 u2485 u2486 u2487 u2488 u2489 u2490 u2491 u2492 u2493 u2494 u2495 u2496 u2497 u2498 u2499)) - - (computation-test (list 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100) - 0)"#, - ); + *TEST_MAX_EXECUTION_TIME.lock().unwrap() = Duration::from_secs(0); assert_eq!( - vm_execute(&program).err().unwrap(), + vm_execute("(+ 1 1)").err().unwrap(), CheckErrors::ExecutionTimeExpired.into() ); + + *TEST_MAX_EXECUTION_TIME.lock().unwrap() = Duration::from_secs(MAX_EXECUTION_TIME_SECS); } From bc74199697e6383762d343f22bcc4c5958e3cbb9 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 18 Feb 2025 16:34:12 +0100 Subject: [PATCH 024/238] fmt --- clarity/src/vm/tests/simple_apply_eval.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 2390eb2d7e0..cac6cd80316 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -14,8 +14,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::time::Duration; + use rstest::rstest; use rstest_reuse::{self, *}; +use serial_test::serial; use stacks_common::address::{ AddressHashMode, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; @@ -42,9 +45,6 @@ use crate::vm::{ MAX_EXECUTION_TIME_SECS, TEST_MAX_EXECUTION_TIME, }; -use serial_test::serial; -use std::time::Duration; - #[test] fn test_doubly_defined_persisted_vars() { let tests = [ From 6587090c7410cb07ff8c606f6add36d254779fcf Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Wed, 19 Feb 2025 16:01:27 +0200 Subject: [PATCH 025/238] chore: update used composite actions versions --- .github/workflows/clarity-js-sdk-pr.yml | 4 +- .github/workflows/clippy.yml | 4 +- .github/workflows/core-build-tests.yml | 4 +- .github/workflows/image-build-source.yml | 4 +- .github/workflows/stacks-core-tests.yml | 8 +- Stacks Core 3.0 OpenAPI.yaml | 868 +++++++++++++++++++++++ 6 files changed, 880 insertions(+), 12 deletions(-) create mode 100644 Stacks Core 3.0 OpenAPI.yaml diff --git a/.github/workflows/clarity-js-sdk-pr.yml b/.github/workflows/clarity-js-sdk-pr.yml index 6bcd555ca9f..e369f8a583b 100644 --- a/.github/workflows/clarity-js-sdk-pr.yml +++ b/.github/workflows/clarity-js-sdk-pr.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Checkout latest clarity js sdk id: git_checkout - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: token: ${{ secrets.GH_TOKEN }} repository: ${{ env.CLARITY_JS_SDK_REPOSITORY }} @@ -46,7 +46,7 @@ jobs: - name: Create Pull Request id: create_pr - uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e # v6.0.5 + uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f # v7.0.6 with: token: ${{ secrets.GH_TOKEN }} commit-message: "chore: update clarity-native-bin tag" diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml index 2279d42c882..048e9fdc036 100644 --- a/.github/workflows/clippy.yml +++ b/.github/workflows/clippy.yml @@ -22,13 +22,13 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@v3 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Define Rust Toolchain id: define_rust_toolchain run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV - name: Setup Rust Toolchain id: setup_rust_toolchain - uses: actions-rust-lang/setup-rust-toolchain@v1 + uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 with: toolchain: ${{ env.RUST_TOOLCHAIN }} components: clippy diff --git a/.github/workflows/core-build-tests.yml b/.github/workflows/core-build-tests.yml index 393e2ff6b03..614f3f69c3d 100644 --- a/.github/workflows/core-build-tests.yml +++ b/.github/workflows/core-build-tests.yml @@ -12,13 +12,13 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@v3 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Define Rust Toolchain id: define_rust_toolchain run: echo "RUST_TOOLCHAIN=$(cat ./rust-toolchain)" >> $GITHUB_ENV - name: Setup Rust Toolchain id: setup_rust_toolchain - uses: actions-rust-lang/setup-rust-toolchain@v1 + uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 with: toolchain: ${{ env.RUST_TOOLCHAIN }} - name: Build the binaries diff --git a/.github/workflows/image-build-source.yml b/.github/workflows/image-build-source.yml index e45455f05b6..1218348dfe5 100644 --- a/.github/workflows/image-build-source.yml +++ b/.github/workflows/image-build-source.yml @@ -49,7 +49,7 @@ jobs: ## Set docker metatdata - name: Docker Metadata ( ${{matrix.dist}} ) id: docker_metadata - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 #v5.5.1 + uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96 #v5.6.1 with: images: | ${{env.docker-org}}/${{ github.event.repository.name }} @@ -61,7 +61,7 @@ jobs: ## Build docker image - name: Build and Push ( ${{matrix.dist}} ) id: docker_build - uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 + uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 # v6.13.0 with: file: ./.github/actions/dockerfiles/Dockerfile.${{matrix.dist}}-source platforms: ${{ env.docker_platforms }} diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 457a2aaefd5..05b9f09f627 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -78,7 +78,7 @@ jobs: ## checkout the code - name: Checkout the latest code id: git_checkout - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Run network relay tests id: nettest @@ -96,10 +96,10 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Execute core contract unit tests with clarinet-sdk id: clarinet_unit_test - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4.2.0 with: node-version: 18.x cache: "npm" @@ -125,7 +125,7 @@ jobs: steps: - name: Checkout the latest code id: git_checkout - uses: actions/checkout@44c2b7a8a4ea60a981eaca3cf939b5f4305c123b # v4.1.5 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Execute core contract unit tests in Clarinet id: clarinet_unit_test_v1 uses: docker://hirosystems/clarinet:1.7.1 diff --git a/Stacks Core 3.0 OpenAPI.yaml b/Stacks Core 3.0 OpenAPI.yaml new file mode 100644 index 00000000000..d82494ca363 --- /dev/null +++ b/Stacks Core 3.0 OpenAPI.yaml @@ -0,0 +1,868 @@ +openapi: 3.1.0 +servers: + - url: http://localhost:20443 + description: Local +info: + title: Stacks 3.0+ RPC API + version: '1.0.0' + description: | + This is the documentation for the `stacks-node` RPC interface. + license: + name: CC-0 + +paths: + /v2/transactions: + post: + summary: Broadcast raw transaction + tags: + - Transactions + description: Broadcast raw transactions on the network. You can use the [@stacks/transactions](https://github.com/blockstack/stacks.js) project to generate a raw transaction payload. + operationId: post_core_node_transactions + requestBody: + content: + application/octet-stream: + schema: + type: string + format: binary + example: binary format of 00000000010400bed38c2aadffa348931bcb542880ff79d607afec000000000000000000000000000000c800012b0b1fff6cccd0974966dcd665835838f0985be508e1322e09fb3d751eca132c492bda720f9ef1768d14fdabed6127560ba52d5e3ac470dcb60b784e97dc88c9030200000000000516df0ba3e79792be7be5e50a370289accfc8c9e032000000000000303974657374206d656d6f00000000000000000000000000000000000000000000000000 + responses: + "200": + description: Transaction ID of successful post of a raw tx to the node's mempool + content: + text/plain: + schema: + type: string + example: '"e161978626f216b2141b156ade10501207ae535fa365a13ef5d7a7c9310a09f2"' + "400": + description: Rejections result in a 400 error + content: + application/json: + schema: + $ref: ./api/transaction/post-core-node-transactions-error.schema.json + example: + $ref: ./api/transaction/post-core-node-transactions-error.example.json + + /v2/contracts/interface/{contract_address}/{contract_name}: + get: + summary: Get contract interface + description: Get contract interface using a `contract_address` and `contract name` + tags: + - Smart Contracts + operationId: get_contract_interface + responses: + "200": + description: Contract interface + content: + application/json: + schema: + $ref: ./api/core-node/get-contract-interface.schema.json + example: + $ref: ./api/core-node/get-contract-interface.example.json + parameters: + - name: contract_address + in: path + required: true + description: Stacks address + schema: + type: string + - name: contract_name + in: path + required: true + description: Contract name + schema: + type: string + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + /v2/map_entry/{contract_address}/{contract_name}/{map_name}: + post: + summary: Get specific data-map inside a contract + tags: + - Smart Contracts + operationId: get_contract_data_map_entry + description: | + Attempt to fetch data from a contract data map. The contract is identified with [Stacks Address] and [Contract Name] in the URL path. The map is identified with [Map Name]. + + The key to lookup in the map is supplied via the POST body. This should be supplied as the hex string serialization of the key (which should be a Clarity value). Note, this is a JSON string atom. + + In the response, `data` is the hex serialization of the map response. Note that map responses are Clarity option types, for non-existent values, this is a serialized none, and for all other responses, it is a serialized (some ...) object. + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-contract-data-map-entry.schema.json + example: + $ref: ./api/core-node/get-contract-data-map-entry.example.json + "400": + description: Failed loading data map + parameters: + - name: contract_address + in: path + required: true + description: Stacks address + schema: + type: string + - name: contract_name + in: path + required: true + description: Contract name + schema: + type: string + - name: map_name + in: path + required: true + description: Map name + schema: + type: string + - name: proof + in: query + description: Returns object without the proof field when set to 0 + schema: + type: integer + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + x-codegen-request-body-name: key + requestBody: + description: Hex string serialization of the lookup key (which should be a Clarity value) + required: true + content: + application/json: + schema: + type: string + + /v2/contracts/source/{contract_address}/{contract_name}: + get: + summary: Get contract source + tags: + - Smart Contracts + operationId: get_contract_source + description: Returns the Clarity source code of a given contract, along with the block height it was published in, and the MARF proof for the data + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-contract-source.schema.json + example: + $ref: ./api/core-node/get-contract-source.example.json + parameters: + - name: contract_address + in: path + required: true + description: Stacks address + schema: + type: string + - name: contract_name + in: path + required: true + description: Contract name + schema: + type: string + - name: proof + in: query + description: Returns object without the proof field if set to 0 + schema: + type: integer + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + required: false + + /v2/contracts/call-read/{contract_address}/{contract_name}/{function_name}: + post: + summary: Call read-only function + tags: + - Smart Contracts + operationId: call_read_only_function + description: | + Call a read-only public function on a given smart contract. + + The smart contract and function are specified using the URL path. The arguments and the simulated tx-sender are supplied via the POST body in the following JSON format: + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: ./api/contract/post-call-read-only-fn.schema.json + examples: + success: + $ref: ./api/contract/post-call-read-only-fn-success.example.json + fail: + $ref: ./api/contract/post-call-read-only-fn-fail.example.json + parameters: + - name: contract_address + in: path + required: true + description: Stacks address + schema: + type: string + - name: contract_name + in: path + required: true + description: Contract name + schema: + type: string + - name: function_name + in: path + required: true + description: Function name + schema: + type: string + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + required: false + requestBody: + description: map of arguments and the simulated tx-sender where sender is either a Contract identifier or a normal Stacks address, and arguments is an array of hex serialized Clarity values. + required: true + content: + application/json: + schema: + $ref: './entities/contracts/read-only-function-args.schema.json' + example: + sender: 'SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0.get-info' + arguments: + - '0x0011...' + - '0x00231...' + + /v2/accounts/{principal}: + get: + summary: Get account info + tags: + - Accounts + operationId: get_account_info + description: | + Get the account data for the provided principal + + Where balance is the hex encoding of a unsigned 128-bit integer (big-endian), nonce is a unsigned 64-bit integer, and the proofs are provided as hex strings. + + For non-existent accounts, this does not 404, rather it returns an object with balance and nonce of 0. + parameters: + - name: principal + in: path + description: Stacks address or a Contract identifier (e.g. `SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0.get-info`) + required: true + schema: + type: string + - name: proof + in: query + description: Returns object without the proof field if set to 0 + schema: + type: integer + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-account-data.schema.json + example: + $ref: ./api/core-node/get-account-data.example.json + + /v2/fees/transaction: + post: + summary: Get approximate fees for the given transaction + tags: + - Fees + description: | + Get an estimated fee for the supplied transaction. This + estimates the execution cost of the transaction, the current + fee rate of the network, and returns estimates for fee + amounts. + + * `transaction_payload` is a hex-encoded serialization of + the TransactionPayload for the transaction. + * `estimated_len` is an optional argument that provides the + endpoint with an estimation of the final length (in bytes) + of the transaction, including any post-conditions and + signatures + + If the node cannot provide an estimate for the transaction + (e.g., if the node has never seen a contract-call for the + given contract and function) or if estimation is not + configured on this node, a 400 response is returned. + The 400 response will be a JSON error containing a `reason` + field which can be one of the following: + + * `DatabaseError` - this Stacks node has had an internal + database error while trying to estimate the costs of the + supplied transaction. + * `NoEstimateAvailable` - this Stacks node has not seen this + kind of contract-call before, and it cannot provide an + estimate yet. + * `CostEstimationDisabled` - this Stacks node does not perform + fee or cost estimation, and it cannot respond on this + endpoint. + + The 200 response contains the following data: + + * `estimated_cost` - the estimated multi-dimensional cost of + executing the Clarity VM on the provided transaction. + * `estimated_cost_scalar` - a unitless integer that the Stacks + node uses to compare how much of the block limit is consumed + by different transactions. This value incorporates the + estimated length of the transaction and the estimated + execution cost of the transaction. The range of this integer + may vary between different Stacks nodes. In order to compute + an estimate of total fee amount for the transaction, this + value is multiplied by the same Stacks node's estimated fee + rate. + * `cost_scalar_change_by_byte` - a float value that indicates how + much the `estimated_cost_scalar` value would increase for every + additional byte in the final transaction. + * `estimations` - an array of estimated fee rates and total fees to + pay in microSTX for the transaction. This array provides a range of + estimates (default: 3) that may be used. Each element of the array + contains the following fields: + * `fee_rate` - the estimated value for the current fee + rates in the network + * `fee` - the estimated value for the total fee in + microSTX that the given transaction should pay. These + values are the result of computing: + `fee_rate` x `estimated_cost_scalar`. + If the estimated fees are less than the minimum relay + fee `(1 ustx x estimated_len)`, then that minimum relay + fee will be returned here instead. + + + Note: If the final transaction's byte size is larger than + supplied to `estimated_len`, then applications should increase + this fee amount by: + + `fee_rate` x `cost_scalar_change_by_byte` x (`final_size` - `estimated_size`) + + operationId: post_fee_transaction + requestBody: + content: + application/json: + schema: + $ref: ./api/core-node/post-fee-transaction.schema.json + example: + $ref: ./api/core-node/post-fee-transaction.example.json + responses: + "200": + description: Estimated fees for the transaction + content: + application/json: + schema: + $ref: ./api/core-node/post-fee-transaction-response.schema.json + example: + $ref: ./api/core-node/post-fee-transaction-response.example.json + + /v2/fees/transfer: + get: + summary: Get estimated fee + tags: + - Fees + operationId: get_fee_transfer + description: Get an estimated fee rate for STX transfer transactions. This a a fee rate / byte, and is returned as a JSON integer + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-fee-transfer.schema.json + example: + $ref: ./api/core-node/get-fee-transfer.example.json + + /v2/info: + get: + summary: Get Core API info + description: Get Core API information + tags: + - Info + operationId: get_core_api_info + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-info.schema.json + example: + $ref: ./api/core-node/get-info.example.json + + /v2/pox: + get: + summary: Get PoX details + description: Get Proof of Transfer (PoX) information. Can be used for Stacking. + tags: + - Info + operationId: get_pox_info + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-pox.schema.json + example: + $ref: ./api/core-node/get-pox.example.json + parameters: + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + + /v2/traits/{contract_address}/{contract_name}/{trait_contract_address}/{trait_contract_name}/{trait_name}: + get: + summary: Get trait implementation details + description: Determine whether or not a specified trait is implemented (either explicitly or implicitly) within a given contract. + tags: + - Smart Contracts + operationId: get_is_trait_implemented + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: ./api/trait/get-is-trait-implemented.schema.json + example: + $ref: ./api/trait/get-is-trait-implemented.example.json + parameters: + - name: contract_address + in: path + required: true + description: Stacks address + schema: + type: string + - name: contract_name + in: path + required: true + description: Contract name + schema: + type: string + - name: trait_contract_address + in: path + required: true + description: Trait Stacks address + schema: + type: string + - name: trait_contract_name + in: path + required: true + description: Trait contract name + schema: + type: string + - name: trait_name + in: path + required: true + description: Trait name + schema: + type: string + - name: tip + in: query + schema: + type: string + description: | + The Stacks chain tip to query from. + If tip == "latest", the query will be run from the latest known tip (includes unconfirmed state). + If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). + + /v2/clarity/marf/{clarity_marf_key}: + post: + summary: Get the MARF value for a given key + tags: + - Smart Contracts + operationId: get_clarity_marf_value + description: | + Attempt to fetch the value of a MARF key. + + In the response, `data` is the hex serialization of the value. + responses: + 200: + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-clarity-marf-value.schema.json + example: + $ref: ./api/core-node/get-clarity-marf-value.example.json + 400: + description: Failed to retrieve MARF key + parameters: + - name: clarity_marf_key + in: path + required: true + description: MARF key + schema: + type: string + - name: proof + in: query + description: Returns object without the proof field when set to 0 + schema: + type: integer + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + + /v2/clarity/metadata/{contract_address}/{contract_name}/{clarity_metadata_key}: + post: + summary: Get the contract metadata for the metadata key + tags: + - Smart Contracts + operationId: get_clarity_metadata_key + description: | + Attempt to fetch the metadata of a contract. The contract is identified with [Contract Address] and [Contract Name] in the URL path. The metadata key is identified with [Clarity Metadata Key]. + + In the response, `data` is formatted as JSON. + responses: + 200: + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-clarity-metadata.schema.json + example: + $ref: ./api/core-node/get-clarity-metadata.example.json + 400: + description: Failed to retrieve constant value from contract + parameters: + - name: contract_address + in: path + required: true + description: Stacks address + schema: + type: string + - name: contract_name + in: path + required: true + description: Contract name + schema: + type: string + - name: clarity_metadata_key + in: path + required: true + description: Metadata key + schema: + type: string + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + + /v2/constant_val/{contract_address}/{contract_name}/{constant_name}: + post: + summary: Get the value of a constant inside a contract + tags: + - Smart Contracts + operationId: get_constant_val + description: | + Attempt to fetch the value of a constant inside a contract. The contract is identified with [Stacks Address] and [Contract Name] in the URL path. The constant is identified with [Constant Name]. + + In the response, `data` is the hex serialization of the constant value. + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: ./api/core-node/get-constant-val.schema.json + example: + $ref: ./api/core-node/get-constant-val.example.json + "400": + description: Failed to retrieve constant value from contract + parameters: + - name: contract_address + in: path + required: true + description: Stacks address + schema: + type: string + - name: contract_name + in: path + required: true + description: Contract name + schema: + type: string + - name: constant_name + in: path + required: true + description: Constant name + schema: + type: string + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest + known tip (includes unconfirmed state). + + /v3/block_proposal: + post: + summary: Validate a proposed Stacks block + tags: + - Mining + operationId: post_block_proposal + description: | + Used by stackers to validate a proposed Stacks block from a miner. + + **This API endpoint requires a basic Authorization header.** + responses: + "202": + description: Block proposal has been accepted for processing. + The result will be returned via the event observer. + content: + application/json: + example: + $ref: ./api/core-node/post-block-proposal-response.example.json + "400": + description: Endpoint not enabled. + "401": + description: Unauthorized. + "429": + description: There is an ongoing proposal validation being processed, + the new request cannot be accepted until the prior request has been processed. + content: + application/json: + example: + $ref: ./api/core-node/post-block-proposal-response.429.json + requestBody: + content: + application/json: + example: + $ref: ./api/core-node/post-block-proposal-req.example.json + + /v3/stacker_set/{cycle_number}: + get: + summary: Fetch the stacker and signer set information for a given cycle. + tags: + - Mining + operationId: get_stacker_set + description: | + Used to get stacker and signer set information for a given cycle. + + This will only return information for cycles started in Epoch-2.5 where PoX-4 was active and subsequent cycles. + parameters: + - name: cycle_number + in: path + required: true + description: reward cycle number + schema: + type: integer + responses: + "200": + description: Information for the given reward cycle + content: + application/json: + example: + $ref: ./api/core-node/get_stacker_set.example.json + "400": + description: Could not fetch the given reward set + content: + application/json: + example: + $ref: ./api/core-node/get_stacker_set.400.example.json + + /v3/blocks/{block_id}: + get: + summary: Fetch a Nakamoto block + tags: + - Blocks + operationId: get_block_v3 + description: + Fetch a Nakamoto block by its index block hash. + parameters: + - name: block_id + in: path + description: The block's ID hash + required: true + schema: + type: string + responses: + "200": + description: The raw SIP-003-encoded block will be returned. + content: + application/octet-stream: + schema: + type: string + format: binary + "404": + description: The block could not be found + content: + application/text-plain: {} + + /v3/blocks/height/{block_height}: + get: + summary: Fetch a Nakamoto block by its height and optional tip + tags: + - Blocks + operationId: get_block_v3_by_height + description: + Fetch a Nakamoto block by its height and optional tip. + parameters: + - name: block_height + in: path + description: The block's height + required: true + schema: + type: integer + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest or empty, the query will be run + from the latest known tip. + responses: + "200": + description: The raw SIP-003-encoded block will be returned. + content: + application/octet-stream: + schema: + type: string + format: binary + "404": + description: The block could not be found + content: + application/text-plain: {} + + /v3/tenures/info: + get: + summary: Fetch metadata about the ongoing Nakamoto tenure + tags: + - Blocks + operationId: get_tenure_info + description: + Fetch metadata about the ongoing Nakamoto tenure. This information is sufficient to obtain and authenticate the highest complete tenure, as well as obtain new tenure blocks. + responses: + "200": + description: Metadata about the ongoing tenure + content: + application/json: + example: + $ref: ./api/core-node/get_tenure_info.json + + /v3/tenures/{block_id}: + get: + summary: Fetch a sequence of Nakamoto blocks in a tenure + tags: + - Blocks + operationId: get_tenures + description: + Fetch a sequence of Nakamoto blocks in a tenure. The blocks will be served in order from highest to lowest. The blocks will be encoded in their SIP-003 wire format, and concatenated together. + responses: + "200": + description: SIP-003-encoded Nakamoto blocks, concatenated together + content: + application/octet-stream: + schema: + type: string + format: binary + parameters: + - name: block_id + in: path + description: + The tenure-start block ID of the tenure to query + required: true + schema: + type: string + - name: stop + in: query + description: + The block ID hash of the highest block in this tenure that is already known to the caller. Neither the corresponding block nor any of its ancestors will be served. This is used to fetch tenure blocks that the caller does not have. + required: false + schema: + type: string + + /v3/sortitions/{lookup_kind}/{lookup}: + get: + summary: Fetch information about evaluated burnchain blocks (i.e., sortitions). + tags: + - Blocks + operationId: get_sortitions + description: + Fetch sortition information about a burnchain block. If the `lookup_kind` and `lookup` parameters are empty, it will return information about the latest burn block. + responses: + "200": + description: Information for the burn block or in the case of `latest_and_last`, multiple burn blocks + content: + application/json: + examples: + Latest: + description: A single element list is returned when just one sortition is requested + value: + $ref: ./api/core-node/get_sortitions.example.json + LatestAndLast: + description: Sortition information about the latest burn block with a winning miner, and the previous such burn block. + value: + $ref: ./api/core-node/get_sortitions_latest_and_prior.example.json + parameters: + - name: lookup_kind + in: path + description: |- + The style of lookup that should be performed. If not given, the most recent burn block processed will be returned. + Otherwise, the `lookup_kind` should be one of the following strings: + * `consensus` - find the burn block using the consensus hash supplied in the `lookup` field. + * `burn_height` - find the burn block using the burn block height supplied in the `lookup` field. + * `burn` - find the burn block using the burn block hash supplied in the `lookup` field. + * `latest_and_last` - return information about the latest burn block with a winning miner *and* the previous such burn block + required: false + schema: + type: string + - name: lookup + in: path + description: The value to use for the lookup if `lookup_kind` is `consensus`, `burn_height`, or `burn` + required: false + schema: + type: string + /v3/signer/{signer}/{cycle_number}: + get: + summary: Get number of blocks signed by signer during a given reward cycle + tags: + - Blocks + - Signers + operationId: get_signer + description: Get number of blocks signed by signer during a given reward cycle + parameters: + - name: signer + in: path + required: true + description: Hex-encoded compressed Secp256k1 public key of signer + schema: + type: string + - name: cycle_number + in: path + required: true + description: Reward cycle number + schema: + type: integer + responses: + 200: + description: Number of blocks signed + content: + text/plain: + schema: + type: integer + example: 7 From dbad2550850669ba8c4fac3b71a8a6ab947c3971 Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Fri, 21 Feb 2025 10:04:57 +0000 Subject: [PATCH 026/238] Bump version --- versions.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/versions.toml b/versions.toml index a701fa3a70f..1d54eb4fc10 100644 --- a/versions.toml +++ b/versions.toml @@ -1,4 +1,4 @@ # Update these values when a new release is created. # `stacks-common/build.rs` will automatically update `versions.rs` with these values. -stacks_node_version = "3.1.0.0.5" -stacks_signer_version = "3.1.0.0.5.0" +stacks_node_version = "3.1.0.0.6" +stacks_signer_version = "3.1.0.0.6.0" From ef1af19988700c914c32c11b2d7fb3c28d041f34 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 21 Feb 2025 16:42:16 -0500 Subject: [PATCH 027/238] chore: update VSCode launch.json --- .vscode/launch.json | 115 ++++++++++++++++---------------------------- 1 file changed, 42 insertions(+), 73 deletions(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index 64a883de0ad..f645a6c87db 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -4,14 +4,19 @@ { "type": "lldb", "request": "launch", - "name": "executable 'blockstack-core'", + "name": "executable 'stacks-node'", "cargo": { - "args": ["build", "--bin=stacks-node"], + "args": [ + "build", + "--bin=stacks-node" + ], "filter": { "kind": "bin" } }, - "args": ["mockamoto"], + "args": [ + "mockamoto" + ], "cwd": "${workspaceFolder}" }, { @@ -19,7 +24,11 @@ "request": "launch", "name": "executable 'clarity-cli'", "cargo": { - "args": ["build", "--bin=clarity-cli", "--package=blockstack-core"], + "args": [ + "build", + "--bin=clarity-cli", + "--package=stackslib" + ], "filter": { "name": "clarity-cli", "kind": "bin" @@ -33,13 +42,19 @@ "request": "launch", "name": "executable 'blockstack-cli'", "cargo": { - "args": ["build", "--bin=blockstack-cli", "--package=blockstack-core"], + "args": [ + "build", + "--bin=blockstack-cli", + "--package=stackslib" + ], "filter": { "name": "blockstack-cli", "kind": "bin" } }, - "args": ["generate-sk"], + "args": [ + "generate-sk" + ], "cwd": "${workspaceFolder}" }, { @@ -47,23 +62,34 @@ "request": "launch", "name": "executable 'stacks-node' -- mocknet", "cargo": { - "args": ["build", "--bin=stacks-node", "--package=stacks-node"], + "args": [ + "build", + "--bin=stacks-node", + "--package=stacks-node" + ], "filter": { "name": "stacks-node", "kind": "bin" } }, - "args": ["mocknet"], + "args": [ + "mocknet" + ], "cwd": "${workspaceFolder}" }, { "type": "lldb", "request": "launch", - "name": "unit tests in library 'blockstack_lib'", + "name": "unit tests in library 'stackslib'", "cargo": { - "args": ["test", "--no-run", "--lib", "--package=blockstack-core"], + "args": [ + "test", + "--no-run", + "--lib", + "--package=stackslib" + ], "filter": { - "name": "blockstack_lib", + "name": "stackslib", "kind": "lib" } }, @@ -73,13 +99,13 @@ { "type": "lldb", "request": "launch", - "name": "unit tests in executable 'blockstack-core'", + "name": "unit tests in executable 'stacks-inspect'", "cargo": { "args": [ "test", "--no-run", "--bin=stacks-inspect", - "--package=blockstack-core" + "--package=stackslib" ], "filter": { "name": "stacks-inspect", @@ -98,7 +124,7 @@ "test", "--no-run", "--bin=clarity-cli", - "--package=blockstack-core" + "--package=stackslib" ], "filter": { "name": "clarity-cli", @@ -117,7 +143,7 @@ "test", "--no-run", "--bin=blockstack-cli", - "--package=blockstack-core" + "--package=stackslib" ], "filter": { "name": "blockstack-cli", @@ -145,63 +171,6 @@ }, "args": [], "cwd": "${workspaceFolder}" - }, - { - "type": "lldb", - "request": "launch", - "name": "benchmark 'marf_bench'", - "cargo": { - "args": [ - "test", - "--no-run", - "--bench=marf_bench", - "--package=blockstack-core" - ], - "filter": { - "name": "marf_bench", - "kind": "bench" - } - }, - "args": [], - "cwd": "${workspaceFolder}" - }, - { - "type": "lldb", - "request": "launch", - "name": "benchmark 'large_contract_bench'", - "cargo": { - "args": [ - "test", - "--no-run", - "--bench=large_contract_bench", - "--package=blockstack-core" - ], - "filter": { - "name": "large_contract_bench", - "kind": "bench" - } - }, - "args": [], - "cwd": "${workspaceFolder}" - }, - { - "type": "lldb", - "request": "launch", - "name": "benchmark 'block_limits'", - "cargo": { - "args": [ - "test", - "--no-run", - "--bench=block_limits", - "--package=blockstack-core" - ], - "filter": { - "name": "block_limits", - "kind": "bench" - } - }, - "args": [], - "cwd": "${workspaceFolder}" } ] -} +} \ No newline at end of file From 0217f847ac8823eb3d59f0a19bf91881864f6ebf Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 25 Feb 2025 16:01:13 +0100 Subject: [PATCH 028/238] added TestFlag usage --- Cargo.lock | 1 + clarity/Cargo.toml | 1 + clarity/src/vm/mod.rs | 10 +++++++--- clarity/src/vm/tests/simple_apply_eval.rs | 4 ++-- stacks-common/src/util/tests.rs | 6 ++++++ 5 files changed, 17 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 96ad10b6cb3..0cb2ebfc1e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -637,6 +637,7 @@ dependencies = [ "serial_test", "slog", "stacks-common", + "stackslib", "time 0.2.27", ] diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 46a1c179e8f..ec515ce9628 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -47,6 +47,7 @@ mutants = "0.0.3" # a nightly rustc regression (35dbef235 2021-03-02) prevents criterion from compiling # but it isn't necessary for tests: only benchmarks. therefore, commenting out for now. # criterion = "0.3" +stacks = { package = "stackslib", path = "../stackslib", features = ["default", "testing"] } serial_test = "3.2.0" [features] diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index f0ec3e84ed7..8cecaae84a7 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -54,9 +54,13 @@ pub mod test_util; pub mod clarity; use std::collections::BTreeMap; +#[cfg(test)] +use std::sync::LazyLock; use std::time::Duration; use serde_json; +#[cfg(test)] +use stacks::util::tests::TestFlag; use stacks_common::types::StacksEpochId; use self::analysis::ContractAnalysis; @@ -90,8 +94,8 @@ pub const MAX_CALL_STACK_DEPTH: usize = 64; pub const MAX_EXECUTION_TIME_SECS: u64 = 30; #[cfg(test)] -static TEST_MAX_EXECUTION_TIME: std::sync::Mutex = - std::sync::Mutex::new(Duration::from_secs(MAX_EXECUTION_TIME_SECS)); +static TEST_MAX_EXECUTION_TIME: LazyLock> = + LazyLock::new(|| TestFlag::new(Duration::from_secs(MAX_EXECUTION_TIME_SECS))); #[derive(Debug, Clone)] pub struct ParsedContract { @@ -316,7 +320,7 @@ fn check_max_execution_time_expired(global_context: &GlobalContext) -> bool { #[cfg(test)] fn check_max_execution_time_expired(global_context: &GlobalContext) -> bool { - global_context.execution_time_tracker.elapsed() > *TEST_MAX_EXECUTION_TIME.lock().unwrap() + global_context.execution_time_tracker.elapsed() > TEST_MAX_EXECUTION_TIME.get() } pub fn eval( diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index cac6cd80316..e31874d3932 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -1771,12 +1771,12 @@ fn test_chain_id() { #[test] #[serial] fn test_execution_time_expiration() { - *TEST_MAX_EXECUTION_TIME.lock().unwrap() = Duration::from_secs(0); + TEST_MAX_EXECUTION_TIME.set(Duration::from_secs(0)); assert_eq!( vm_execute("(+ 1 1)").err().unwrap(), CheckErrors::ExecutionTimeExpired.into() ); - *TEST_MAX_EXECUTION_TIME.lock().unwrap() = Duration::from_secs(MAX_EXECUTION_TIME_SECS); + TEST_MAX_EXECUTION_TIME.set(Duration::from_secs(MAX_EXECUTION_TIME_SECS)); } diff --git a/stacks-common/src/util/tests.rs b/stacks-common/src/util/tests.rs index 1b01a449be1..c941e444bbe 100644 --- a/stacks-common/src/util/tests.rs +++ b/stacks-common/src/util/tests.rs @@ -53,6 +53,12 @@ impl Default for TestFlag { } } +impl TestFlag { + pub fn new(initial_value: T) -> Self { + Self(Arc::new(Mutex::new(Some(initial_value)))) + } +} + impl TestFlag { /// Sets the value of the test flag. /// From f321a2c4558f0e391a7e8adeafd1418593c80619 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Tue, 25 Feb 2025 22:34:26 +0200 Subject: [PATCH 029/238] fix: remove leftover openapi yml file --- Stacks Core 3.0 OpenAPI.yaml | 868 ----------------------------------- 1 file changed, 868 deletions(-) delete mode 100644 Stacks Core 3.0 OpenAPI.yaml diff --git a/Stacks Core 3.0 OpenAPI.yaml b/Stacks Core 3.0 OpenAPI.yaml deleted file mode 100644 index d82494ca363..00000000000 --- a/Stacks Core 3.0 OpenAPI.yaml +++ /dev/null @@ -1,868 +0,0 @@ -openapi: 3.1.0 -servers: - - url: http://localhost:20443 - description: Local -info: - title: Stacks 3.0+ RPC API - version: '1.0.0' - description: | - This is the documentation for the `stacks-node` RPC interface. - license: - name: CC-0 - -paths: - /v2/transactions: - post: - summary: Broadcast raw transaction - tags: - - Transactions - description: Broadcast raw transactions on the network. You can use the [@stacks/transactions](https://github.com/blockstack/stacks.js) project to generate a raw transaction payload. - operationId: post_core_node_transactions - requestBody: - content: - application/octet-stream: - schema: - type: string - format: binary - example: binary format of 00000000010400bed38c2aadffa348931bcb542880ff79d607afec000000000000000000000000000000c800012b0b1fff6cccd0974966dcd665835838f0985be508e1322e09fb3d751eca132c492bda720f9ef1768d14fdabed6127560ba52d5e3ac470dcb60b784e97dc88c9030200000000000516df0ba3e79792be7be5e50a370289accfc8c9e032000000000000303974657374206d656d6f00000000000000000000000000000000000000000000000000 - responses: - "200": - description: Transaction ID of successful post of a raw tx to the node's mempool - content: - text/plain: - schema: - type: string - example: '"e161978626f216b2141b156ade10501207ae535fa365a13ef5d7a7c9310a09f2"' - "400": - description: Rejections result in a 400 error - content: - application/json: - schema: - $ref: ./api/transaction/post-core-node-transactions-error.schema.json - example: - $ref: ./api/transaction/post-core-node-transactions-error.example.json - - /v2/contracts/interface/{contract_address}/{contract_name}: - get: - summary: Get contract interface - description: Get contract interface using a `contract_address` and `contract name` - tags: - - Smart Contracts - operationId: get_contract_interface - responses: - "200": - description: Contract interface - content: - application/json: - schema: - $ref: ./api/core-node/get-contract-interface.schema.json - example: - $ref: ./api/core-node/get-contract-interface.example.json - parameters: - - name: contract_address - in: path - required: true - description: Stacks address - schema: - type: string - - name: contract_name - in: path - required: true - description: Contract name - schema: - type: string - - name: tip - in: query - schema: - type: string - description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest - known tip (includes unconfirmed state). - /v2/map_entry/{contract_address}/{contract_name}/{map_name}: - post: - summary: Get specific data-map inside a contract - tags: - - Smart Contracts - operationId: get_contract_data_map_entry - description: | - Attempt to fetch data from a contract data map. The contract is identified with [Stacks Address] and [Contract Name] in the URL path. The map is identified with [Map Name]. - - The key to lookup in the map is supplied via the POST body. This should be supplied as the hex string serialization of the key (which should be a Clarity value). Note, this is a JSON string atom. - - In the response, `data` is the hex serialization of the map response. Note that map responses are Clarity option types, for non-existent values, this is a serialized none, and for all other responses, it is a serialized (some ...) object. - responses: - "200": - description: Success - content: - application/json: - schema: - $ref: ./api/core-node/get-contract-data-map-entry.schema.json - example: - $ref: ./api/core-node/get-contract-data-map-entry.example.json - "400": - description: Failed loading data map - parameters: - - name: contract_address - in: path - required: true - description: Stacks address - schema: - type: string - - name: contract_name - in: path - required: true - description: Contract name - schema: - type: string - - name: map_name - in: path - required: true - description: Map name - schema: - type: string - - name: proof - in: query - description: Returns object without the proof field when set to 0 - schema: - type: integer - - name: tip - in: query - schema: - type: string - description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest - known tip (includes unconfirmed state). - x-codegen-request-body-name: key - requestBody: - description: Hex string serialization of the lookup key (which should be a Clarity value) - required: true - content: - application/json: - schema: - type: string - - /v2/contracts/source/{contract_address}/{contract_name}: - get: - summary: Get contract source - tags: - - Smart Contracts - operationId: get_contract_source - description: Returns the Clarity source code of a given contract, along with the block height it was published in, and the MARF proof for the data - responses: - "200": - description: Success - content: - application/json: - schema: - $ref: ./api/core-node/get-contract-source.schema.json - example: - $ref: ./api/core-node/get-contract-source.example.json - parameters: - - name: contract_address - in: path - required: true - description: Stacks address - schema: - type: string - - name: contract_name - in: path - required: true - description: Contract name - schema: - type: string - - name: proof - in: query - description: Returns object without the proof field if set to 0 - schema: - type: integer - - name: tip - in: query - schema: - type: string - description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest - known tip (includes unconfirmed state). - required: false - - /v2/contracts/call-read/{contract_address}/{contract_name}/{function_name}: - post: - summary: Call read-only function - tags: - - Smart Contracts - operationId: call_read_only_function - description: | - Call a read-only public function on a given smart contract. - - The smart contract and function are specified using the URL path. The arguments and the simulated tx-sender are supplied via the POST body in the following JSON format: - responses: - "200": - description: Success - content: - application/json: - schema: - $ref: ./api/contract/post-call-read-only-fn.schema.json - examples: - success: - $ref: ./api/contract/post-call-read-only-fn-success.example.json - fail: - $ref: ./api/contract/post-call-read-only-fn-fail.example.json - parameters: - - name: contract_address - in: path - required: true - description: Stacks address - schema: - type: string - - name: contract_name - in: path - required: true - description: Contract name - schema: - type: string - - name: function_name - in: path - required: true - description: Function name - schema: - type: string - - name: tip - in: query - schema: - type: string - description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest - known tip (includes unconfirmed state). - required: false - requestBody: - description: map of arguments and the simulated tx-sender where sender is either a Contract identifier or a normal Stacks address, and arguments is an array of hex serialized Clarity values. - required: true - content: - application/json: - schema: - $ref: './entities/contracts/read-only-function-args.schema.json' - example: - sender: 'SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0.get-info' - arguments: - - '0x0011...' - - '0x00231...' - - /v2/accounts/{principal}: - get: - summary: Get account info - tags: - - Accounts - operationId: get_account_info - description: | - Get the account data for the provided principal - - Where balance is the hex encoding of a unsigned 128-bit integer (big-endian), nonce is a unsigned 64-bit integer, and the proofs are provided as hex strings. - - For non-existent accounts, this does not 404, rather it returns an object with balance and nonce of 0. - parameters: - - name: principal - in: path - description: Stacks address or a Contract identifier (e.g. `SP31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZW97B5P0.get-info`) - required: true - schema: - type: string - - name: proof - in: query - description: Returns object without the proof field if set to 0 - schema: - type: integer - - name: tip - in: query - schema: - type: string - description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest - known tip (includes unconfirmed state). - responses: - "200": - description: Success - content: - application/json: - schema: - $ref: ./api/core-node/get-account-data.schema.json - example: - $ref: ./api/core-node/get-account-data.example.json - - /v2/fees/transaction: - post: - summary: Get approximate fees for the given transaction - tags: - - Fees - description: | - Get an estimated fee for the supplied transaction. This - estimates the execution cost of the transaction, the current - fee rate of the network, and returns estimates for fee - amounts. - - * `transaction_payload` is a hex-encoded serialization of - the TransactionPayload for the transaction. - * `estimated_len` is an optional argument that provides the - endpoint with an estimation of the final length (in bytes) - of the transaction, including any post-conditions and - signatures - - If the node cannot provide an estimate for the transaction - (e.g., if the node has never seen a contract-call for the - given contract and function) or if estimation is not - configured on this node, a 400 response is returned. - The 400 response will be a JSON error containing a `reason` - field which can be one of the following: - - * `DatabaseError` - this Stacks node has had an internal - database error while trying to estimate the costs of the - supplied transaction. - * `NoEstimateAvailable` - this Stacks node has not seen this - kind of contract-call before, and it cannot provide an - estimate yet. - * `CostEstimationDisabled` - this Stacks node does not perform - fee or cost estimation, and it cannot respond on this - endpoint. - - The 200 response contains the following data: - - * `estimated_cost` - the estimated multi-dimensional cost of - executing the Clarity VM on the provided transaction. - * `estimated_cost_scalar` - a unitless integer that the Stacks - node uses to compare how much of the block limit is consumed - by different transactions. This value incorporates the - estimated length of the transaction and the estimated - execution cost of the transaction. The range of this integer - may vary between different Stacks nodes. In order to compute - an estimate of total fee amount for the transaction, this - value is multiplied by the same Stacks node's estimated fee - rate. - * `cost_scalar_change_by_byte` - a float value that indicates how - much the `estimated_cost_scalar` value would increase for every - additional byte in the final transaction. - * `estimations` - an array of estimated fee rates and total fees to - pay in microSTX for the transaction. This array provides a range of - estimates (default: 3) that may be used. Each element of the array - contains the following fields: - * `fee_rate` - the estimated value for the current fee - rates in the network - * `fee` - the estimated value for the total fee in - microSTX that the given transaction should pay. These - values are the result of computing: - `fee_rate` x `estimated_cost_scalar`. - If the estimated fees are less than the minimum relay - fee `(1 ustx x estimated_len)`, then that minimum relay - fee will be returned here instead. - - - Note: If the final transaction's byte size is larger than - supplied to `estimated_len`, then applications should increase - this fee amount by: - - `fee_rate` x `cost_scalar_change_by_byte` x (`final_size` - `estimated_size`) - - operationId: post_fee_transaction - requestBody: - content: - application/json: - schema: - $ref: ./api/core-node/post-fee-transaction.schema.json - example: - $ref: ./api/core-node/post-fee-transaction.example.json - responses: - "200": - description: Estimated fees for the transaction - content: - application/json: - schema: - $ref: ./api/core-node/post-fee-transaction-response.schema.json - example: - $ref: ./api/core-node/post-fee-transaction-response.example.json - - /v2/fees/transfer: - get: - summary: Get estimated fee - tags: - - Fees - operationId: get_fee_transfer - description: Get an estimated fee rate for STX transfer transactions. This a a fee rate / byte, and is returned as a JSON integer - responses: - "200": - description: Success - content: - application/json: - schema: - $ref: ./api/core-node/get-fee-transfer.schema.json - example: - $ref: ./api/core-node/get-fee-transfer.example.json - - /v2/info: - get: - summary: Get Core API info - description: Get Core API information - tags: - - Info - operationId: get_core_api_info - responses: - "200": - description: Success - content: - application/json: - schema: - $ref: ./api/core-node/get-info.schema.json - example: - $ref: ./api/core-node/get-info.example.json - - /v2/pox: - get: - summary: Get PoX details - description: Get Proof of Transfer (PoX) information. Can be used for Stacking. - tags: - - Info - operationId: get_pox_info - responses: - "200": - description: Success - content: - application/json: - schema: - $ref: ./api/core-node/get-pox.schema.json - example: - $ref: ./api/core-node/get-pox.example.json - parameters: - - name: tip - in: query - schema: - type: string - description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest - known tip (includes unconfirmed state). - - /v2/traits/{contract_address}/{contract_name}/{trait_contract_address}/{trait_contract_name}/{trait_name}: - get: - summary: Get trait implementation details - description: Determine whether or not a specified trait is implemented (either explicitly or implicitly) within a given contract. - tags: - - Smart Contracts - operationId: get_is_trait_implemented - responses: - "200": - description: Success - content: - application/json: - schema: - $ref: ./api/trait/get-is-trait-implemented.schema.json - example: - $ref: ./api/trait/get-is-trait-implemented.example.json - parameters: - - name: contract_address - in: path - required: true - description: Stacks address - schema: - type: string - - name: contract_name - in: path - required: true - description: Contract name - schema: - type: string - - name: trait_contract_address - in: path - required: true - description: Trait Stacks address - schema: - type: string - - name: trait_contract_name - in: path - required: true - description: Trait contract name - schema: - type: string - - name: trait_name - in: path - required: true - description: Trait name - schema: - type: string - - name: tip - in: query - schema: - type: string - description: | - The Stacks chain tip to query from. - If tip == "latest", the query will be run from the latest known tip (includes unconfirmed state). - If the tip is left unspecified, the stacks chain tip will be selected (only includes confirmed state). - - /v2/clarity/marf/{clarity_marf_key}: - post: - summary: Get the MARF value for a given key - tags: - - Smart Contracts - operationId: get_clarity_marf_value - description: | - Attempt to fetch the value of a MARF key. - - In the response, `data` is the hex serialization of the value. - responses: - 200: - description: Success - content: - application/json: - schema: - $ref: ./api/core-node/get-clarity-marf-value.schema.json - example: - $ref: ./api/core-node/get-clarity-marf-value.example.json - 400: - description: Failed to retrieve MARF key - parameters: - - name: clarity_marf_key - in: path - required: true - description: MARF key - schema: - type: string - - name: proof - in: query - description: Returns object without the proof field when set to 0 - schema: - type: integer - - name: tip - in: query - schema: - type: string - description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest - known tip (includes unconfirmed state). - - /v2/clarity/metadata/{contract_address}/{contract_name}/{clarity_metadata_key}: - post: - summary: Get the contract metadata for the metadata key - tags: - - Smart Contracts - operationId: get_clarity_metadata_key - description: | - Attempt to fetch the metadata of a contract. The contract is identified with [Contract Address] and [Contract Name] in the URL path. The metadata key is identified with [Clarity Metadata Key]. - - In the response, `data` is formatted as JSON. - responses: - 200: - description: Success - content: - application/json: - schema: - $ref: ./api/core-node/get-clarity-metadata.schema.json - example: - $ref: ./api/core-node/get-clarity-metadata.example.json - 400: - description: Failed to retrieve constant value from contract - parameters: - - name: contract_address - in: path - required: true - description: Stacks address - schema: - type: string - - name: contract_name - in: path - required: true - description: Contract name - schema: - type: string - - name: clarity_metadata_key - in: path - required: true - description: Metadata key - schema: - type: string - - name: tip - in: query - schema: - type: string - description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest - known tip (includes unconfirmed state). - - /v2/constant_val/{contract_address}/{contract_name}/{constant_name}: - post: - summary: Get the value of a constant inside a contract - tags: - - Smart Contracts - operationId: get_constant_val - description: | - Attempt to fetch the value of a constant inside a contract. The contract is identified with [Stacks Address] and [Contract Name] in the URL path. The constant is identified with [Constant Name]. - - In the response, `data` is the hex serialization of the constant value. - responses: - "200": - description: Success - content: - application/json: - schema: - $ref: ./api/core-node/get-constant-val.schema.json - example: - $ref: ./api/core-node/get-constant-val.example.json - "400": - description: Failed to retrieve constant value from contract - parameters: - - name: contract_address - in: path - required: true - description: Stacks address - schema: - type: string - - name: contract_name - in: path - required: true - description: Contract name - schema: - type: string - - name: constant_name - in: path - required: true - description: Constant name - schema: - type: string - - name: tip - in: query - schema: - type: string - description: The Stacks chain tip to query from. If tip == latest, the query will be run from the latest - known tip (includes unconfirmed state). - - /v3/block_proposal: - post: - summary: Validate a proposed Stacks block - tags: - - Mining - operationId: post_block_proposal - description: | - Used by stackers to validate a proposed Stacks block from a miner. - - **This API endpoint requires a basic Authorization header.** - responses: - "202": - description: Block proposal has been accepted for processing. - The result will be returned via the event observer. - content: - application/json: - example: - $ref: ./api/core-node/post-block-proposal-response.example.json - "400": - description: Endpoint not enabled. - "401": - description: Unauthorized. - "429": - description: There is an ongoing proposal validation being processed, - the new request cannot be accepted until the prior request has been processed. - content: - application/json: - example: - $ref: ./api/core-node/post-block-proposal-response.429.json - requestBody: - content: - application/json: - example: - $ref: ./api/core-node/post-block-proposal-req.example.json - - /v3/stacker_set/{cycle_number}: - get: - summary: Fetch the stacker and signer set information for a given cycle. - tags: - - Mining - operationId: get_stacker_set - description: | - Used to get stacker and signer set information for a given cycle. - - This will only return information for cycles started in Epoch-2.5 where PoX-4 was active and subsequent cycles. - parameters: - - name: cycle_number - in: path - required: true - description: reward cycle number - schema: - type: integer - responses: - "200": - description: Information for the given reward cycle - content: - application/json: - example: - $ref: ./api/core-node/get_stacker_set.example.json - "400": - description: Could not fetch the given reward set - content: - application/json: - example: - $ref: ./api/core-node/get_stacker_set.400.example.json - - /v3/blocks/{block_id}: - get: - summary: Fetch a Nakamoto block - tags: - - Blocks - operationId: get_block_v3 - description: - Fetch a Nakamoto block by its index block hash. - parameters: - - name: block_id - in: path - description: The block's ID hash - required: true - schema: - type: string - responses: - "200": - description: The raw SIP-003-encoded block will be returned. - content: - application/octet-stream: - schema: - type: string - format: binary - "404": - description: The block could not be found - content: - application/text-plain: {} - - /v3/blocks/height/{block_height}: - get: - summary: Fetch a Nakamoto block by its height and optional tip - tags: - - Blocks - operationId: get_block_v3_by_height - description: - Fetch a Nakamoto block by its height and optional tip. - parameters: - - name: block_height - in: path - description: The block's height - required: true - schema: - type: integer - - name: tip - in: query - schema: - type: string - description: The Stacks chain tip to query from. If tip == latest or empty, the query will be run - from the latest known tip. - responses: - "200": - description: The raw SIP-003-encoded block will be returned. - content: - application/octet-stream: - schema: - type: string - format: binary - "404": - description: The block could not be found - content: - application/text-plain: {} - - /v3/tenures/info: - get: - summary: Fetch metadata about the ongoing Nakamoto tenure - tags: - - Blocks - operationId: get_tenure_info - description: - Fetch metadata about the ongoing Nakamoto tenure. This information is sufficient to obtain and authenticate the highest complete tenure, as well as obtain new tenure blocks. - responses: - "200": - description: Metadata about the ongoing tenure - content: - application/json: - example: - $ref: ./api/core-node/get_tenure_info.json - - /v3/tenures/{block_id}: - get: - summary: Fetch a sequence of Nakamoto blocks in a tenure - tags: - - Blocks - operationId: get_tenures - description: - Fetch a sequence of Nakamoto blocks in a tenure. The blocks will be served in order from highest to lowest. The blocks will be encoded in their SIP-003 wire format, and concatenated together. - responses: - "200": - description: SIP-003-encoded Nakamoto blocks, concatenated together - content: - application/octet-stream: - schema: - type: string - format: binary - parameters: - - name: block_id - in: path - description: - The tenure-start block ID of the tenure to query - required: true - schema: - type: string - - name: stop - in: query - description: - The block ID hash of the highest block in this tenure that is already known to the caller. Neither the corresponding block nor any of its ancestors will be served. This is used to fetch tenure blocks that the caller does not have. - required: false - schema: - type: string - - /v3/sortitions/{lookup_kind}/{lookup}: - get: - summary: Fetch information about evaluated burnchain blocks (i.e., sortitions). - tags: - - Blocks - operationId: get_sortitions - description: - Fetch sortition information about a burnchain block. If the `lookup_kind` and `lookup` parameters are empty, it will return information about the latest burn block. - responses: - "200": - description: Information for the burn block or in the case of `latest_and_last`, multiple burn blocks - content: - application/json: - examples: - Latest: - description: A single element list is returned when just one sortition is requested - value: - $ref: ./api/core-node/get_sortitions.example.json - LatestAndLast: - description: Sortition information about the latest burn block with a winning miner, and the previous such burn block. - value: - $ref: ./api/core-node/get_sortitions_latest_and_prior.example.json - parameters: - - name: lookup_kind - in: path - description: |- - The style of lookup that should be performed. If not given, the most recent burn block processed will be returned. - Otherwise, the `lookup_kind` should be one of the following strings: - * `consensus` - find the burn block using the consensus hash supplied in the `lookup` field. - * `burn_height` - find the burn block using the burn block height supplied in the `lookup` field. - * `burn` - find the burn block using the burn block hash supplied in the `lookup` field. - * `latest_and_last` - return information about the latest burn block with a winning miner *and* the previous such burn block - required: false - schema: - type: string - - name: lookup - in: path - description: The value to use for the lookup if `lookup_kind` is `consensus`, `burn_height`, or `burn` - required: false - schema: - type: string - /v3/signer/{signer}/{cycle_number}: - get: - summary: Get number of blocks signed by signer during a given reward cycle - tags: - - Blocks - - Signers - operationId: get_signer - description: Get number of blocks signed by signer during a given reward cycle - parameters: - - name: signer - in: path - required: true - description: Hex-encoded compressed Secp256k1 public key of signer - schema: - type: string - - name: cycle_number - in: path - required: true - description: Reward cycle number - schema: - type: integer - responses: - 200: - description: Number of blocks signed - content: - text/plain: - schema: - type: integer - example: 7 From a072fe9c6f8fb238dcfe844c1c601e8e22b9d07f Mon Sep 17 00:00:00 2001 From: bestmike007 Date: Tue, 25 Feb 2025 18:07:22 +0000 Subject: [PATCH 030/238] feat: improve cost tracker performance --- clarity/src/vm/costs/mod.rs | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 65a0377cdf3..74beebeff2c 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -38,7 +38,7 @@ use crate::vm::types::Value::UInt; use crate::vm::types::{ FunctionType, PrincipalData, QualifiedContractIdentifier, TupleData, TypeSignature, }; -use crate::vm::{eval_all, ClarityName, SymbolicExpression, Value}; +use crate::vm::{CallStack, ClarityName, Environment, LocalContext, SymbolicExpression, Value}; pub mod constants; pub mod cost_functions; @@ -1054,7 +1054,7 @@ pub fn parse_cost( // TODO: add tests from mutation testing results #4832 #[cfg_attr(test, mutants::skip)] fn compute_cost( - cost_tracker: &mut TrackerData, + cost_tracker: &TrackerData, cost_function_reference: ClarityCostFunctionReference, input_sizes: &[u64], eval_in_epoch: StacksEpochId, @@ -1073,7 +1073,7 @@ fn compute_cost( let cost_contract = cost_tracker .cost_contracts - .get_mut(&cost_function_reference.contract_id) + .get(&cost_function_reference.contract_id) .ok_or(CostErrors::CostComputationFailed(format!( "CostFunction not found: {cost_function_reference}" )))?; @@ -1088,14 +1088,23 @@ fn compute_cost( ))); } - let function_invocation = [SymbolicExpression::list(program)]; + let function_invocation = SymbolicExpression::list(program); + let eval_result = global_context.execute(|global_context| { + let context = LocalContext::new(); + let mut call_stack = CallStack::new(); + let publisher: PrincipalData = cost_contract.contract_identifier.issuer.clone().into(); + let mut env = Environment::new( + global_context, + cost_contract, + &mut call_stack, + Some(publisher.clone()), + Some(publisher.clone()), + None, + ); - let eval_result = eval_all( - &function_invocation, - cost_contract, - &mut global_context, - None, - ); + let result = super::eval(&function_invocation, &mut env, &context)?; + Ok(Some(result)) + }); parse_cost(&cost_function_reference.to_string(), eval_result) } From 348c9cf1b45242a88c76365b8aa7b7681e157772 Mon Sep 17 00:00:00 2001 From: bestmike007 Date: Thu, 27 Feb 2025 18:20:44 +0000 Subject: [PATCH 031/238] chore: cost proptest unit tests from @kantai --- clarity/src/vm/contexts.rs | 5 +++++ clarity/src/vm/costs/mod.rs | 4 ++-- stackslib/src/clarity_vm/tests/costs.rs | 28 ++++++++++++++----------- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 87c9d56de1c..69a68ea2602 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -799,6 +799,11 @@ impl<'a, 'hooks> OwnedEnvironment<'a, 'hooks> { self.context.cost_track.get_total() } + #[cfg(any(test, feature = "testing"))] + pub fn mut_cost_tracker(&mut self) -> &mut LimitedCostTracker { + &mut self.context.cost_track + } + /// Destroys this environment, returning ownership of its database reference. /// If the context wasn't top-level (i.e., it had uncommitted data), return None, /// because the database is not guaranteed to be in a sane state. diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 74beebeff2c..1d8806690a6 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -331,7 +331,7 @@ pub struct TrackerData { /// if the cost tracker is non-free, this holds the StacksEpochId that should be used to evaluate /// the Clarity cost functions. If the tracker *is* free, then those functions do not need to be /// evaluated, so no epoch identifier is necessary. - epoch: StacksEpochId, + pub epoch: StacksEpochId, mainnet: bool, chain_id: u32, } @@ -1053,7 +1053,7 @@ pub fn parse_cost( // TODO: add tests from mutation testing results #4832 #[cfg_attr(test, mutants::skip)] -fn compute_cost( +pub fn compute_cost( cost_tracker: &TrackerData, cost_function_reference: ClarityCostFunctionReference, input_sizes: &[u64], diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index 6868d11b654..f8f32bc6fb9 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -24,8 +24,9 @@ use clarity::vm::contexts::{ use clarity::vm::contracts::Contract; use clarity::vm::costs::cost_functions::ClarityCostFunction; use clarity::vm::costs::{ - parse_cost, ClarityCostFunctionEvaluator, ClarityCostFunctionReference, CostErrors, - DefaultVersion, ExecutionCost, LimitedCostTracker, COSTS_1_NAME, COSTS_2_NAME, COSTS_3_NAME, + compute_cost, parse_cost, ClarityCostFunctionEvaluator, ClarityCostFunctionReference, + CostErrors, DefaultVersion, ExecutionCost, LimitedCostTracker, COSTS_1_NAME, COSTS_2_NAME, + COSTS_3_NAME, }; use clarity::vm::database::{ClarityDatabase, MemoryBackingStore}; use clarity::vm::errors::{CheckErrors, Error, RuntimeErrorType}; @@ -885,19 +886,16 @@ fn eval_cost_fn( let mainnet = owned_env.is_mainnet(); let boot_costs_id = boot_code_id(cost_contract_name, mainnet); let cost_fn_name = cost_fn.get_name_str(); - - let exec = format!("({cost_fn_name} u{argument})"); - - let exec_result = owned_env - .eval_read_only(&boot_costs_id, &exec) - .map(|(value, _, _)| Some(value)); - + let cost_tracker = owned_env.mut_cost_tracker(); + let data = match cost_tracker { + LimitedCostTracker::Free => panic!(), + LimitedCostTracker::Limited(data) => data, + }; let clarity_cost_fn_ref = ClarityCostFunctionReference { contract_id: boot_costs_id, function_name: cost_fn_name.to_string(), }; - - parse_cost(&clarity_cost_fn_ref.to_string(), exec_result) + compute_cost(data, clarity_cost_fn_ref, &[argument], data.epoch) } fn eval_replaced_cost_fn( @@ -926,7 +924,13 @@ fn proptest_cost_fn(cost_fn: &ClarityCostFunction, cost_contract_name: &str) { inputs.push(2u64.pow(i) + 1); }); for use_mainnet in [true, false] { - with_owned_env(StacksEpochId::latest(), use_mainnet, |mut owned_env| { + let epoch = match cost_contract_name { + COSTS_1_NAME => StacksEpochId::Epoch20, + COSTS_2_NAME => StacksEpochId::Epoch2_05, + COSTS_3_NAME => StacksEpochId::latest(), + _ => panic!(), + }; + with_owned_env(epoch, use_mainnet, |mut owned_env| { for i in inputs.iter() { eprintln!("Evaluating {cost_contract_name}.{cost_fn}({i})"); let clar_evaled = eval_cost_fn(&mut owned_env, cost_contract_name, cost_fn, *i); From 512669be09a42a044289bc54cbc459fe623cf653 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 27 Feb 2025 16:01:28 -0500 Subject: [PATCH 032/238] fix: miner should not count repeat rejections Before this fix, when the miner is waiting for responses to its block proposal, if it received a block rejection more than once, the second and subsequent rejections would incorrectly count towards the 30% rejection threshold. This could cause the miner to prematurely treat the block as rejected, causing it to build and propose a new block. This change fixes that so that it only counts the response from each signer once. --- stacks-signer/src/v0/signer.rs | 66 +++++++++--- .../src/nakamoto_node/stackerdb_listener.rs | 12 ++- testnet/stacks-node/src/tests/signer/v0.rs | 101 ++++++++++++++++-- 3 files changed, 153 insertions(+), 26 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index e25d39d41a9..7599015c6e9 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,6 +15,7 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; +use std::sync::LazyLock; use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; @@ -22,10 +23,12 @@ use blockstack_lib::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, TOO_MANY_REQUESTS_STATUS, }; use blockstack_lib::util_lib::db::Error as DBError; -use clarity::types::chainstate::StacksPrivateKey; +use clarity::types::chainstate::{StacksPrivateKey, StacksPublicKey}; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; use clarity::util::secp256k1::Secp256k1PublicKey; +use clarity::util::sleep_ms; +use clarity::util::tests::TestFlag; use libsigner::v0::messages::{ BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, RejectReason, RejectReasonPrefix, SignerMessage, @@ -44,6 +47,11 @@ use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; +/// A global variable that can be used to make signers repeat their proposal +/// response if their public key is in the provided list +pub static TEST_REPEAT_PROPOSAL_RESPONSE: LazyLock>> = + LazyLock::new(TestFlag::default); + /// Signer running mode (whether dry-run or real) #[derive(Debug)] pub enum SignerMode { @@ -464,6 +472,49 @@ impl Signer { } } + #[cfg(any(test, feature = "testing"))] + fn send_block_response(&mut self, block_response: BlockResponse) { + const NUM_REPEATS: usize = 1; + let mut count = 0; + let public_keys = TEST_REPEAT_PROPOSAL_RESPONSE.get(); + if !public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + count = NUM_REPEATS; + } + while count <= NUM_REPEATS { + let res = self + .stackerdb + .send_message_with_retry::(block_response.clone().into()); + match res { + Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), + Ok(ack) if !ack.accepted => warn!( + "{self}: Block rejection not accepted by stacker-db: {:?}", + ack.reason + ), + Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), + } + + count += 1; + sleep_ms(1000); + } + } + + #[cfg(not(any(test, feature = "testing")))] + fn send_block_response(&mut self, block_response: BlockResponse) { + let res = self + .stackerdb + .send_message_with_retry::(block_response.clone().into()); + match res { + Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), + Ok(ack) if !ack.accepted => warn!( + "{self}: Block rejection not accepted by stacker-db: {:?}", + ack.reason + ), + Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), + } + } + /// Handle block proposal messages submitted to signers stackerdb fn handle_block_proposal( &mut self, @@ -575,18 +626,7 @@ impl Signer { if let Some(block_response) = block_response { // We know proposal is invalid. Send rejection message, do not do further validation and do not store it. debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); - let res = self - .stackerdb - .send_message_with_retry::(block_response.into()); - - match res { - Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), - Ok(ack) if !ack.accepted => warn!( - "{self}: Block rejection not accepted by stacker-db: {:?}", - ack.reason - ), - Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), - } + self.send_block_response(block_response); } else { // Just in case check if the last block validation submission timed out. self.check_submitted_block_proposal(); diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index c05e221610a..54ff73dfbae 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -384,11 +384,13 @@ impl StackerDBListener { continue; } }; - block.responded_signers.insert(rejected_pubkey); - block.total_weight_rejected = block - .total_weight_rejected - .checked_add(signer_entry.weight) - .expect("FATAL: total weight rejected exceeds u32::MAX"); + + if block.responded_signers.insert(rejected_pubkey) { + block.total_weight_rejected = block + .total_weight_rejected + .checked_add(signer_entry.weight) + .expect("FATAL: total weight rejected exceeds u32::MAX"); + } info!("StackerDBListener: Signer rejected block"; "block_signer_sighash" => %rejected_data.signer_signature_hash, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3485e53c5d8..bb47f50e3e0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -68,6 +68,7 @@ use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::signerdb::SignerDb; +use stacks_signer::v0::signer::TEST_REPEAT_PROPOSAL_RESPONSE; use stacks_signer::v0::tests::{ TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, TEST_SKIP_SIGNER_CLEANUP, TEST_STALL_BLOCK_VALIDATION_SUBMISSION, @@ -3038,12 +3039,13 @@ fn retry_on_rejection() { submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal"); - loop { + wait_for(60, || { if proposed_blocks.load(Ordering::SeqCst) > proposals_before { - break; + return Ok(true); } - std::thread::sleep(Duration::from_millis(100)); - } + Ok(false) + }) + .expect("Timed out waiting for block proposal"); info!("Block proposed, verifying that it is not processed"); // Wait 10 seconds to be sure that the timeout has occurred @@ -3053,12 +3055,15 @@ fn retry_on_rejection() { // resume signing info!("Disable unconditional rejection and wait for the block to be processed"); TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); - loop { + + wait_for(60, || { if mined_blocks.load(Ordering::SeqCst) > blocks_before { - break; + return Ok(true); } - std::thread::sleep(Duration::from_millis(100)); - } + Ok(false) + }) + .expect("Timed out waiting for block to be mined"); + signer_test.shutdown(); } @@ -12085,3 +12090,83 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { } miners.shutdown(); } + +#[test] +#[ignore] +/// This test checks that the miner ignore repeat block rejections. +fn repeated_rejection() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 3)]); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + let proposed_blocks = signer_test + .running_nodes + .counters + .naka_proposed_blocks + .clone(); + + signer_test.mine_nakamoto_block(Duration::from_secs(60), true); + + // make signer[0] reject all proposals and to repeat the rejection + let rejecting_signer = + StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[0]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![rejecting_signer]); + TEST_REPEAT_PROPOSAL_RESPONSE.set(vec![rejecting_signer]); + + // make signer[1] ignore all proposals + let ignoring_signer = StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[1]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![ignoring_signer]); + + let proposals_before = proposed_blocks.load(Ordering::SeqCst); + + // submit a tx so that the miner will mine a block + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(60, || { + if proposed_blocks.load(Ordering::SeqCst) > proposals_before { + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for block proposal"); + + let proposals_after = proposed_blocks.load(Ordering::SeqCst); + info!("Block proposed, verifying that it is not rejected"); + + // Ensure that the miner does not propose any more blocks + _ = wait_for(60, || { + assert_eq!( + proposed_blocks.load(Ordering::SeqCst), + proposals_after, + "Miner proposed another block" + ); + Ok(false) + }); + + signer_test.shutdown(); +} From e4e5cd0d3643f6107f78da4afb0218764832419b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 27 Feb 2025 22:11:21 -0500 Subject: [PATCH 033/238] feat: on timeout, re-propose the same block When a miner times out waiting for signatures, instead of proposing a new block, it should only re-propose the same block. Proposing a new block is guaranteed to fail because signers that approved the original block will reject any new block at the same height. This implements the miner side of #5856. A change is still needed on the signer side to allow a signer to accept a block that it previously rejected. --- testnet/stacks-node/src/nakamoto_node.rs | 2 + .../src/nakamoto_node/signer_coordinator.rs | 118 ++++++++-------- .../src/nakamoto_node/stackerdb_listener.rs | 29 +++- testnet/stacks-node/src/tests/signer/v0.rs | 127 ++++++++++++++++++ 4 files changed, 220 insertions(+), 56 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index c49e0bbc731..b06dafbd023 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -143,6 +143,8 @@ pub enum Error { /// NetError wrapper #[error("NetError: {0}")] NetError(#[from] NetError), + #[error("Timed out waiting for signatures")] + SignatureTimeout, } impl StacksNode { diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 25c0421e83f..b705fb4ddad 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -254,46 +254,54 @@ impl SignerCoordinator { }; let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); - debug!("Sending block proposal message to signers"; - "signer_signature_hash" => %block.header.signer_signature_hash(), - ); - Self::send_miners_message::( - &self.message_key, - sortdb, - election_sortition, - stackerdbs, - block_proposal_message, - MinerSlotID::BlockProposal, - self.is_mainnet, - &mut self.miners_session, - &election_sortition.consensus_hash, - )?; - counters.bump_naka_proposed_blocks(); - #[cfg(test)] - { - info!( - "SignerCoordinator: sent block proposal to .miners, waiting for test signing channel" + loop { + debug!("Sending block proposal message to signers"; + "signer_signature_hash" => %block.header.signer_signature_hash(), ); - // In test mode, short-circuit waiting for the signers if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - if let Some(signatures) = - crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + Self::send_miners_message::( + &self.message_key, + sortdb, + election_sortition, + stackerdbs, + block_proposal_message.clone(), + MinerSlotID::BlockProposal, + self.is_mainnet, + &mut self.miners_session, + &election_sortition.consensus_hash, + )?; + counters.bump_naka_proposed_blocks(); + + #[cfg(test)] { - debug!("Short-circuiting waiting for signers, using test signature"); - return Ok(signatures); + info!( + "SignerCoordinator: sent block proposal to .miners, waiting for test signing channel" + ); + // In test mode, short-circuit waiting for the signers if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + if let Some(signatures) = + crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + { + debug!("Short-circuiting waiting for signers, using test signature"); + return Ok(signatures); + } } - } - self.get_block_status( - &block.header.signer_signature_hash(), - &block.block_id(), - block.header.parent_block_id, - chain_state, - sortdb, - counters, - ) + let res = self.get_block_status( + &block.header.signer_signature_hash(), + &block.block_id(), + block.header.parent_block_id, + chain_state, + sortdb, + counters, + ); + + match res { + Err(NakamotoNodeError::SignatureTimeout) => continue, + _ => return res, + } + } } /// Get the block status for a given block hash. @@ -340,7 +348,7 @@ impl SignerCoordinator { if rejections_timer.elapsed() > *rejections_timeout { return false; } - // number or rejections changed? + // number of rejections changed? if status.total_weight_rejected != rejections { return false; } @@ -353,7 +361,7 @@ impl SignerCoordinator { // If we just received a timeout, we should check if the burnchain // tip has changed or if we received this signed block already in // the staging db. - debug!("SignerCoordinator: Timeout waiting for block signatures"); + debug!("SignerCoordinator: Intermediate timeout waiting for block status"); // Look in the nakamoto staging db -- a block can only get stored there // if it has enough signing weight to clear the threshold. @@ -380,15 +388,17 @@ impl SignerCoordinator { } if rejections_timer.elapsed() > *rejections_timeout { - warn!("Timed out while waiting for responses from signers"; - "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.as_secs(), - "rejections" => rejections, - "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + warn!("Timed out while waiting for responses from signers, resending proposal"; + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Timed out while waiting for signatures".into(), - )); + + // Reset the rejections in the stackerdb listener + self.stackerdb_comms.reset_rejections(block_signer_sighash); + + return Err(NakamotoNodeError::SignatureTimeout); } // Check if a new Stacks block has arrived in the parent tenure @@ -399,7 +409,7 @@ impl SignerCoordinator { )? .ok_or(NakamotoNodeError::UnexpectedChainState)?; if highest_in_tenure.index_block_hash() != parent_block_id { - debug!("SignCoordinator: Exiting due to new stacks tip"); + info!("SignCoordinator: Exiting due to new stacks tip"); return Err(NakamotoNodeError::StacksTipChanged); } @@ -448,14 +458,16 @@ impl SignerCoordinator { return Ok(block_status.gathered_signatures.values().cloned().collect()); } else if rejections_timer.elapsed() > *rejections_timeout { warn!("Timed out while waiting for responses from signers"; - "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.as_secs(), - "rejections" => rejections, - "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Timed out while waiting for signatures".into(), - )); + + // Reset the rejections in the stackerdb listener + self.stackerdb_comms.reset_rejections(block_signer_sighash); + + return Err(NakamotoNodeError::SignatureTimeout); } else { continue; } diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 54ff73dfbae..a0225bc7efd 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -52,9 +52,13 @@ pub static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); #[derive(Debug, Clone)] pub struct BlockStatus { - pub responded_signers: HashSet, + /// Set of the slot ids of signers who have responded + pub responded_signers: HashSet, + /// Map of the slot id of signers who have signed the block and their signature pub gathered_signatures: BTreeMap, + /// Total weight of signers who have signed the block pub total_weight_approved: u32, + /// Total weight of signers who have rejected the block pub total_weight_rejected: u32, } @@ -342,7 +346,7 @@ impl StackerDBListener { "server_version" => metadata.server_version, ); block.gathered_signatures.insert(slot_id, signature); - block.responded_signers.insert(signer_pubkey); + block.responded_signers.insert(slot_id); if block.total_weight_approved >= self.weight_threshold { // Signal to anyone waiting on this block that we have enough signatures @@ -385,7 +389,7 @@ impl StackerDBListener { } }; - if block.responded_signers.insert(rejected_pubkey) { + if block.responded_signers.insert(slot_id) { block.total_weight_rejected = block .total_weight_rejected .checked_add(signer_entry.weight) @@ -498,6 +502,25 @@ impl StackerDBListenerComms { blocks.insert(block.signer_signature_hash(), block_status); } + /// Reset rejections for a block proposal. + /// This is used when a block proposal times out and we need to retry it by + /// clearing the block's rejections. Block approvals cannot be cleared + /// because an old approval could always be used to make a block reach + /// the approval threshold. + pub fn reset_rejections(&self, signer_sighash: &Sha512Trunc256Sum) { + let (lock, _cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + if let Some(block) = blocks.get_mut(signer_sighash) { + block.responded_signers.clear(); + block.total_weight_rejected = 0; + + // Add approving signers back to the responded signers set + for (slot_id, _) in block.gathered_signatures.iter() { + block.responded_signers.insert(*slot_id); + } + } + } + /// Get the status for `block` from the Stacker DB listener. /// If the block is not found in the map, return an error. /// If the block is found, call `condition` to check if the block status diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index bb47f50e3e0..96b373c52d3 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12170,3 +12170,130 @@ fn repeated_rejection() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test verifies that a miner will re-propose the same block if it times +/// out waiting for signers to reach consensus on the block. +fn retry_proposal() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * 3)], + |_| {}, + |config| { + config.miner.block_rejection_timeout_steps.clear(); + config + .miner + .block_rejection_timeout_steps + .insert(0, Duration::from_secs(123)); + config + .miner + .block_rejection_timeout_steps + .insert(10, Duration::from_secs(20)); + config + .miner + .block_rejection_timeout_steps + .insert(15, Duration::from_secs(10)); + config + .miner + .block_rejection_timeout_steps + .insert(20, Duration::from_secs(30)); + }, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + let proposed_blocks = signer_test + .running_nodes + .counters + .naka_proposed_blocks + .clone(); + + signer_test.mine_nakamoto_block(Duration::from_secs(60), true); + + let info = get_chain_info(&signer_test.running_nodes.conf); + let block_height_before = info.stacks_tip_height; + + // make signer[0] reject all proposals + let rejecting_signer = + StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[0]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![rejecting_signer]); + + // make signer[1] ignore all proposals + let ignoring_signer = StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[1]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![ignoring_signer]); + + let proposals_before = proposed_blocks.load(Ordering::SeqCst); + + // submit a tx so that the miner will mine a block + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(60, || { + if proposed_blocks.load(Ordering::SeqCst) > proposals_before { + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for block proposal"); + + info!( + "Block proposed, submitting another transaction that should not get included in the block" + ); + let transfer_tx = make_stacks_transfer( + &sender_sk, + 1, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Disable signer 1 from ignoring proposals"); + test_observer::clear(); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![]); + + info!("Waiting for the block to be approved"); + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + if info.stacks_tip_height > block_height_before { + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for block"); + + // Ensure that the block was the original block with just 1 transfer + let blocks = test_observer::get_blocks(); + let block = blocks.first().expect("No blocks found"); + let transactions = block["transactions"].as_array().unwrap(); + assert_eq!(transactions.len(), 1); + + signer_test.shutdown(); +} From 5468683fef4fab16674f264d37e011523aaffe6f Mon Sep 17 00:00:00 2001 From: Adriano Di Luzio Date: Fri, 28 Feb 2025 09:41:40 +0100 Subject: [PATCH 034/238] Move to `contrib/nix` --- contrib/nix/README.md | 16 +++++++++ flake.lock => contrib/nix/flake.lock | 18 +++++----- flake.nix => contrib/nix/flake.nix | 54 ++++++++++++++-------------- 3 files changed, 52 insertions(+), 36 deletions(-) create mode 100644 contrib/nix/README.md rename flake.lock => contrib/nix/flake.lock (79%) rename flake.nix => contrib/nix/flake.nix (73%) diff --git a/contrib/nix/README.md b/contrib/nix/README.md new file mode 100644 index 00000000000..7c47c2357cb --- /dev/null +++ b/contrib/nix/README.md @@ -0,0 +1,16 @@ +# `nix` flake + +Build `stacks-node` and `stacks-signer` by pointing to the `flake.nix` file in +this directory. For instance, from the root directory: `nix build +'./contrib/nix'`. + +## Using `direnv` + +If using `direnv`, from the root directory of this repository: + +```bash +echo "use flake ./contrib/nix/" > .envrc +direnv allow +``` + +This will provide a `sh` environment with required dependencies (e.g., `bitcoind`) available. diff --git a/flake.lock b/contrib/nix/flake.lock similarity index 79% rename from flake.lock rename to contrib/nix/flake.lock index 3284405275f..69951ab2969 100644 --- a/flake.lock +++ b/contrib/nix/flake.lock @@ -2,11 +2,11 @@ "nodes": { "crane": { "locked": { - "lastModified": 1739053031, - "narHash": "sha256-LrMDRuwAlRFD2T4MgBSRd1s2VtOE+Vl1oMCNu3RpPE0=", + "lastModified": 1739936662, + "narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=", "owner": "ipetkov", "repo": "crane", - "rev": "112e6591b2d6313b1bd05a80a754a8ee42432a7e", + "rev": "19de14aaeb869287647d9461cbd389187d8ecdb7", "type": "github" }, "original": { @@ -37,11 +37,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1739419412, - "narHash": "sha256-NCWZQg4DbYVFWg+MOFrxWRaVsLA7yvRWAf6o0xPR1hI=", + "lastModified": 1740547748, + "narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "2d55b4c1531187926c2a423f6940b3b1301399b5", + "rev": "3a05eebede89661660945da1f151959900903b6a", "type": "github" }, "original": { @@ -67,11 +67,11 @@ ] }, "locked": { - "lastModified": 1739413688, - "narHash": "sha256-57OAXXYhOibG7Rqhhr4ecI1H8mtDJB2uj0T8rbQVGLY=", + "lastModified": 1740709839, + "narHash": "sha256-4dF++MXIXna/AwlZWDKr7bgUmY4xoEwvkF1GewjNrt0=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "675a6427d505f140dab8c56379afb66d4f55800b", + "rev": "b4270835bf43c6f80285adac6f66a26d83f0f277", "type": "github" }, "original": { diff --git a/flake.nix b/contrib/nix/flake.nix similarity index 73% rename from flake.nix rename to contrib/nix/flake.nix index ea94db3105b..afd88e8a61f 100644 --- a/flake.nix +++ b/contrib/nix/flake.nix @@ -39,12 +39,12 @@ inherit (pkgs) lib; - toolchain = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain; + toolchain = pkgs.rust-bin.fromRustupToolchainFile ../../rust-toolchain; craneLib = (crane.mkLib pkgs).overrideToolchain toolchain; name = "stacks-core"; - versions = (builtins.fromTOML (builtins.readFile ./versions.toml)); + versions = (builtins.fromTOML (builtins.readFile ../../versions.toml)); version = versions.stacks_node_version; # Common arguments can be set here to avoid repeating them later @@ -68,7 +68,7 @@ // { inherit version; pname = name; - src = fileSetForCrate ./.; + src = fileSetForCrate ../..; } ); @@ -83,35 +83,35 @@ fileSetForCrate = crate: lib.fileset.toSource { - root = ./.; + root = ../..; fileset = lib.fileset.unions [ - ./Cargo.toml - ./Cargo.lock + ../../Cargo.toml + ../../Cargo.lock # - ./versions.toml + ../../versions.toml # - ./stx-genesis/name_zonefiles.txt - ./stx-genesis/name_zonefiles.txt.sha256 - ./stx-genesis/name_zonefiles-test.txt - ./stx-genesis/name_zonefiles-test.txt.sha256 - ./stx-genesis/chainstate.txt - ./stx-genesis/chainstate.txt.sha256 - ./stx-genesis/chainstate-test.txt - ./stx-genesis/chainstate-test.txt.sha256 + ../../stx-genesis/name_zonefiles.txt + ../../stx-genesis/name_zonefiles.txt.sha256 + ../../stx-genesis/name_zonefiles-test.txt + ../../stx-genesis/name_zonefiles-test.txt.sha256 + ../../stx-genesis/chainstate.txt + ../../stx-genesis/chainstate.txt.sha256 + ../../stx-genesis/chainstate-test.txt + ../../stx-genesis/chainstate-test.txt.sha256 # (craneLib.fileset.commonCargoSources crate) # - (lib.fileset.fileFilter (file: file.hasExt "clar") ./.) + (lib.fileset.fileFilter (file: file.hasExt "clar") ../..) # - (craneLib.fileset.commonCargoSources ./clarity) - (craneLib.fileset.commonCargoSources ./contrib/tools/relay-server) - (craneLib.fileset.commonCargoSources ./libsigner) - (craneLib.fileset.commonCargoSources ./libstackerdb) - (craneLib.fileset.commonCargoSources ./pox-locking) - (craneLib.fileset.commonCargoSources ./stacks-common) - (craneLib.fileset.commonCargoSources ./stackslib) - (craneLib.fileset.commonCargoSources ./stx-genesis) - (craneLib.fileset.commonCargoSources ./testnet/stacks-node) + (craneLib.fileset.commonCargoSources ../../clarity) + (craneLib.fileset.commonCargoSources ../../contrib/tools/relay-server) + (craneLib.fileset.commonCargoSources ../../libsigner) + (craneLib.fileset.commonCargoSources ../../libstackerdb) + (craneLib.fileset.commonCargoSources ../../pox-locking) + (craneLib.fileset.commonCargoSources ../../stacks-common) + (craneLib.fileset.commonCargoSources ../../stackslib) + (craneLib.fileset.commonCargoSources ../../stx-genesis) + (craneLib.fileset.commonCargoSources ../../testnet/stacks-node) ]; }; @@ -122,7 +122,7 @@ pname = "stacks-signer"; cargoFeatures = "--features monitoring_prom"; cargoExtraArgs = "${cargoFeatures} -p ${pname}"; - src = fileSetForCrate ./stacks-signer; + src = fileSetForCrate ../../stacks-signer; } ); @@ -136,7 +136,7 @@ pname = name; cargoFeatures = "--features monitoring_prom,slog_json"; cargoExtraArgs = "${cargoFeatures}"; - src = fileSetForCrate ./.; + src = fileSetForCrate ../..; } ); in From 221c92cc701fc7bb219285427db3624d60e9517c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 3 Mar 2025 10:16:07 -0500 Subject: [PATCH 035/238] chore: add parent burn block height to commit log --- stackslib/src/chainstate/burn/db/sortdb.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 29ed43e0451..a8eb83eb250 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -5645,7 +5645,8 @@ impl SortitionHandleTx<'_> { "ACCEPTED({}) leader block commit {} at {},{}", op.block_height, &op.txid, op.block_height, op.vtxindex; "apparent_sender" => %op.apparent_sender, - "stacks_block_hash" => %op.block_header_hash + "stacks_block_hash" => %op.block_header_hash, + "parent_burn_block" => %op.parent_block_ptr ); self.insert_block_commit(op, sort_id) } From 0799e268fe312cadadf6363d29e80cbe7cf2fbf4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 28 Feb 2025 20:55:37 -0500 Subject: [PATCH 036/238] fix: add test gating for imports --- stacks-signer/src/v0/signer.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 7599015c6e9..de4718f5c78 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,6 +15,7 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; +#[cfg(any(test, feature = "testing"))] use std::sync::LazyLock; use std::time::{Duration, Instant}; @@ -23,11 +24,15 @@ use blockstack_lib::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, TOO_MANY_REQUESTS_STATUS, }; use blockstack_lib::util_lib::db::Error as DBError; -use clarity::types::chainstate::{StacksPrivateKey, StacksPublicKey}; +use clarity::types::chainstate::StacksPrivateKey; +#[cfg(any(test, feature = "testing"))] +use clarity::types::chainstate::StacksPublicKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; use clarity::util::secp256k1::Secp256k1PublicKey; +#[cfg(any(test, feature = "testing"))] use clarity::util::sleep_ms; +#[cfg(any(test, feature = "testing"))] use clarity::util::tests::TestFlag; use libsigner::v0::messages::{ BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MockProposal, MockSignature, @@ -49,6 +54,7 @@ use crate::Signer as SignerTrait; /// A global variable that can be used to make signers repeat their proposal /// response if their public key is in the provided list +#[cfg(any(test, feature = "testing"))] pub static TEST_REPEAT_PROPOSAL_RESPONSE: LazyLock>> = LazyLock::new(TestFlag::default); From b543726e9cf8e5f8e0679c4c1af3b8750d2fbbe4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 3 Mar 2025 13:28:26 -0500 Subject: [PATCH 037/238] test: resolve flakiness in `global_acceptance_depends_on_block_announcement` --- testnet/stacks-node/src/tests/signer/v0.rs | 32 +++++++++------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 96b373c52d3..654499a32eb 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -8421,28 +8421,22 @@ fn global_acceptance_depends_on_block_announcement() { ); TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); - TEST_SKIP_BLOCK_ANNOUNCEMENT.set(false); TEST_IGNORE_SIGNERS.set(false); - TEST_SKIP_BLOCK_BROADCAST.set(false); test_observer::clear(); - let info_before = signer_test.get_peer_info(); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - let info = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info.stacks_tip_height > info_before.stacks_tip_height - && info_before.stacks_tip_consensus_hash != info.stacks_tip_consensus_hash) - }, - ) - .expect("Stacks miner failed to produce new blocks during the newest burn block's tenure"); - let sister_block = - wait_for_block_pushed_by_miner_key(30, info_before.stacks_tip_height + 1, &miner_pk) - .expect("Timed out waiting for block N+1' to be mined"); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + let sister_block = wait_for_block_proposal(30, info_before.stacks_tip_height + 1, &miner_pk) + .expect("Timed out waiting for block N+1' to be proposed"); + + TEST_SKIP_BLOCK_ANNOUNCEMENT.set(false); + TEST_SKIP_BLOCK_BROADCAST.set(false); + + wait_for_block_pushed(30, sister_block.header.signer_signature_hash()) + .expect("Timed out waiting for block N+1' to be mined"); assert_ne!( sister_block.header.signer_signature_hash(), block_n_1.header.signer_signature_hash() From fe048ad5819539ffffdffc0eb81b89efec952a34 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 27 Feb 2025 10:43:24 -0500 Subject: [PATCH 038/238] chore: bump versions to 3.1.0.0.7 --- CHANGELOG.md | 2 +- stacks-signer/CHANGELOG.md | 2 +- versions.toml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 159d7a8d9af..d649981962e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## [Unreleased] +## [3.1.0.0.7] ## Added diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 4dff38b686d..e8307b48374 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## [Unreleased] +## [3.1.0.0.7.0] ## Changed diff --git a/versions.toml b/versions.toml index 1d54eb4fc10..138c89c7173 100644 --- a/versions.toml +++ b/versions.toml @@ -1,4 +1,4 @@ # Update these values when a new release is created. # `stacks-common/build.rs` will automatically update `versions.rs` with these values. -stacks_node_version = "3.1.0.0.6" -stacks_signer_version = "3.1.0.0.6.0" +stacks_node_version = "3.1.0.0.7" +stacks_signer_version = "3.1.0.0.7.0" From 8db9e5e495af57f5ea1c128505a056810999a2ee Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 3 Mar 2025 13:38:50 -0500 Subject: [PATCH 039/238] docs: add `## [Unreleased]` to changelogs --- CHANGELOG.md | 2 ++ stacks-signer/CHANGELOG.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d649981962e..ea39bd441ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + ## [3.1.0.0.7] ## Added diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index e8307b48374..59fe85aee6e 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -5,6 +5,8 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + ## [3.1.0.0.7.0] ## Changed From b746bef09b60c8743327faba9787a9cde53a3712 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 3 Mar 2025 16:16:47 -0500 Subject: [PATCH 040/238] test: fix flakiness in `nakamoto_lockup_events` --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f1117be811e..ebd2bc5c4e5 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -9645,12 +9645,14 @@ fn nakamoto_lockup_events() { wait_for(30, || Ok(get_stacks_height() > height_before)).unwrap(); } + wait_for(30, || { + let blocks = test_observer::get_blocks(); + let block = blocks.last().unwrap(); + Ok(block.get("block_height").unwrap().as_u64().unwrap() == unlock_height) + }) + .expect("Timed out waiting for test observer to reach unlock height"); let blocks = test_observer::get_blocks(); let block = blocks.last().unwrap(); - assert_eq!( - block.get("block_height").unwrap().as_u64().unwrap(), - unlock_height - ); let events = block.get("events").unwrap().as_array().unwrap(); let mut found_event = false; From e3f9dfeb3d557053fae49ad02005278babc5ddb6 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 3 Mar 2025 17:24:44 -0500 Subject: [PATCH 041/238] test: reduce flakiness in `retry_proposal` --- testnet/stacks-node/src/tests/signer/v0.rs | 26 +++++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 654499a32eb..f85be74d095 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12165,6 +12165,20 @@ fn repeated_rejection() { signer_test.shutdown(); } +fn transfers_in_block(block: &serde_json::Value) -> usize { + let transactions = block["transactions"].as_array().unwrap(); + let mut count = 0; + for tx in transactions { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::TokenTransfer(..) = &parsed.payload { + count += 1; + } + } + count +} + #[test] #[ignore] /// This test verifies that a miner will re-propose the same block if it times @@ -12270,13 +12284,14 @@ fn retry_proposal() { submit_tx(&http_origin, &transfer_tx); info!("Disable signer 1 from ignoring proposals"); - test_observer::clear(); TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![]); info!("Waiting for the block to be approved"); wait_for(60, || { - let info = get_chain_info(&signer_test.running_nodes.conf); - if info.stacks_tip_height > block_height_before { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().expect("No blocks found"); + let height = last_block["block_height"].as_u64().unwrap(); + if height > block_height_before { return Ok(true); } Ok(false) @@ -12285,9 +12300,8 @@ fn retry_proposal() { // Ensure that the block was the original block with just 1 transfer let blocks = test_observer::get_blocks(); - let block = blocks.first().expect("No blocks found"); - let transactions = block["transactions"].as_array().unwrap(); - assert_eq!(transactions.len(), 1); + let block = blocks.last().expect("No blocks found"); + assert_eq!(transfers_in_block(block), 1); signer_test.shutdown(); } From 8b4ec39211aa807fae1dc12658db0836acd4ffb3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 19 Dec 2024 15:52:54 -0500 Subject: [PATCH 042/238] feat: redesign nonce cache This redesign uses a proper LRU cache and is more careful about flushing the cache more efficiently. --- stacks-common/src/types/sqlite.rs | 9 +- stacks-common/src/util/lru_cache.rs | 256 ++++++++++++++++++ stacks-common/src/util/mod.rs | 1 + .../stacks/tests/block_construction.rs | 2 +- stackslib/src/config/mod.rs | 8 +- stackslib/src/core/mempool.rs | 199 ++------------ stackslib/src/core/mod.rs | 1 + stackslib/src/core/nonce_cache.rs | 253 +++++++++++++++++ .../stacks-node/src/nakamoto_node/miner.rs | 7 + 9 files changed, 548 insertions(+), 188 deletions(-) create mode 100644 stacks-common/src/util/lru_cache.rs create mode 100644 stackslib/src/core/nonce_cache.rs diff --git a/stacks-common/src/types/sqlite.rs b/stacks-common/src/types/sqlite.rs index 183ec61fbc6..57010ea118e 100644 --- a/stacks-common/src/types/sqlite.rs +++ b/stacks-common/src/types/sqlite.rs @@ -16,7 +16,7 @@ use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; -use super::chainstate::VRFSeed; +use super::chainstate::{StacksAddress, VRFSeed}; use crate::deps_common::bitcoin::util::hash::Sha256dHash; use crate::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, TrieHash, @@ -42,6 +42,13 @@ impl ToSql for Sha256dHash { } } +impl rusqlite::types::ToSql for StacksAddress { + fn to_sql(&self) -> rusqlite::Result { + let addr_str = self.to_string(); + Ok(addr_str.into()) + } +} + // Implement rusqlite traits for a bunch of structs that used to be defined // in the chainstate code impl_byte_array_rusqlite_only!(ConsensusHash); diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs new file mode 100644 index 00000000000..97b55e69bc1 --- /dev/null +++ b/stacks-common/src/util/lru_cache.rs @@ -0,0 +1,256 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashMap; + +/// Node in the doubly linked list +struct Node { + key: K, + value: V, + dirty: bool, + next: usize, + prev: usize, +} + +/// LRU cache for account nonces +pub struct LruCache { + capacity: usize, + /// Map from address to an offset in the linked list + cache: HashMap, + /// Doubly linked list of values in order of most recently used + order: Vec>, + /// Index of the head of the linked list -- the most recently used element + head: usize, + /// Index of the tail of the linked list -- the least recently used element + tail: usize, +} + +impl LruCache { + /// Create a new LRU cache with the given capacity + pub fn new(capacity: usize) -> Self { + LruCache { + capacity, + cache: HashMap::new(), + order: Vec::with_capacity(capacity), + head: capacity, + tail: capacity, + } + } + + /// Get the value for the given key + pub fn get(&mut self, key: &K) -> Option { + if let Some(node) = self.cache.get(key) { + // Move the node to the head of the LRU list + let node = *node; + + if node != self.head { + let prev = self.order[node].prev; + let next = self.order[node].next; + + if node == self.tail { + // If this is the tail, update the tail + self.tail = prev; + } else { + // Else, update the next node's prev pointer + self.order[next].prev = prev; + } + + self.order[prev].next = next; + self.order[node].prev = self.capacity; + self.order[node].next = self.head; + self.order[self.head].prev = node; + self.head = node; + } + + Some(self.order[node].value) + } else { + None + } + } + + /// Insert a key-value pair into the cache, marking it as dirty. + /// Returns `Some((K, V))` if a dirty value was evicted. + pub fn insert(&mut self, key: K, value: V) -> Option<(K, V)> { + self.insert_with_dirty(key, value, true) + } + + /// Insert a key-value pair into the cache, marking it as clean. + /// Returns `Some((K, V))` if a dirty value was evicted. + pub fn insert_clean(&mut self, key: K, value: V) -> Option<(K, V)> { + self.insert_with_dirty(key, value, false) + } + + /// Insert a key-value pair into the cache + /// Returns `Some((K, V))` if a dirty value was evicted. + pub fn insert_with_dirty(&mut self, key: K, value: V, dirty: bool) -> Option<(K, V)> { + let mut evicted = None; + if let Some(node) = self.cache.get(&key) { + // Update the value for the key + let node = *node; + self.order[node].value = value; + self.order[node].dirty = dirty; + + // Just call get to handle updating the LRU list + self.get(&key); + } else { + let index = if self.cache.len() == self.capacity { + // Take the place of the least recently used element. + // First, remove it from the tail of the LRU list + let index = self.tail; + let prev = self.order[index].prev; + self.order[prev].next = self.capacity; + self.tail = prev; + + // Remove it from the cache + self.cache.remove(&self.order[index].key); + + // If it is dirty, save the key-value pair to return + if self.order[index].dirty { + evicted = Some(( + std::mem::replace(&mut self.order[index].key, key.clone()), + self.order[index].value, + )); + } + + // Insert this new value into the cache + self.cache.insert(key, index); + + // Update the node with the new key-value pair, inserting it at + // the head of the LRU list + self.order[index].value = value; + self.order[index].dirty = dirty; + self.order[index].next = self.head; + self.order[index].prev = self.capacity; + + index + } else { + // Insert a new key-value pair + let node = Node { + key: key.clone(), + value, + dirty: dirty, + next: self.head, + prev: self.capacity, + }; + + let index = self.order.len(); + self.order.push(node); + self.cache.insert(key, index); + + index + }; + + // Put it at the head of the LRU list + if self.head != self.capacity { + self.order[self.head].prev = index; + } else { + self.tail = index; + } + + self.head = index; + } + evicted + } + + pub fn flush(&mut self, mut f: impl FnMut(&K, V) -> Result<(), E>) -> Result<(), E> { + let mut index = self.head; + while index != self.capacity { + println!("checking {index}, dirty? {}", self.order[index].dirty); + let next = self.order[index].next; + if self.order[index].dirty { + let value = self.order[index].value; + f(&self.order[index].key, value)?; + self.order[index].dirty = false; + } + index = next; + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_lru_cache() { + let mut cache = LruCache::new(2); + + cache.insert(1, 1); + cache.insert(2, 2); + assert_eq!(cache.get(&1), Some(1)); + cache.insert(3, 3); + assert_eq!(cache.get(&2), None); + cache.insert(4, 4); + assert_eq!(cache.get(&1), None); + assert_eq!(cache.get(&3), Some(3)); + assert_eq!(cache.get(&4), Some(4)); + } + + #[test] + fn test_lru_cache_update() { + let mut cache = LruCache::new(2); + + cache.insert(1, 1); + cache.insert(2, 2); + cache.insert(1, 10); + assert_eq!(cache.get(&1), Some(10)); + cache.insert(3, 3); + assert_eq!(cache.get(&2), None); + cache.insert(2, 4); + assert_eq!(cache.get(&2), Some(4)); + assert_eq!(cache.get(&3), Some(3)); + } + + #[test] + fn test_lru_cache_evicted() { + let mut cache = LruCache::new(2); + + assert!(cache.insert(1, 1).is_none()); + assert!(cache.insert(2, 2).is_none()); + let evicted = cache.insert(3, 3).expect("expected an eviction"); + assert_eq!(evicted, (1, 1)); + } + + #[test] + fn test_lru_cache_flush() { + let mut cache = LruCache::new(2); + + cache.insert(1, 1); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .unwrap(); + + assert_eq!(flushed, vec![(1, 1)]); + + cache.insert(1, 3); + cache.insert(2, 2); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .unwrap(); + + assert_eq!(flushed, vec![(2, 2), (1, 3)]); + } +} diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 46158d2f4f4..cdc6d14e703 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -21,6 +21,7 @@ pub mod macros; pub mod chunked_encoding; pub mod db; pub mod hash; +pub mod lru_cache; pub mod pair; pub mod pipe; pub mod retry; diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 0e70321784c..b8a7a2e1b22 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -4852,7 +4852,7 @@ fn mempool_walk_test_users_10_rounds_3_cache_size_2000_null_prob_100() { fn paramaterized_mempool_walk_test( num_users: usize, num_rounds: usize, - nonce_and_candidate_cache_size: u64, + nonce_and_candidate_cache_size: usize, consider_no_estimate_tx_prob: u8, timeout_ms: u128, ) { diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 937c90ebdc8..5382fb482ae 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2120,8 +2120,8 @@ pub struct MinerConfig { /// Wait for a downloader pass before mining. /// This can only be disabled in testing; it can't be changed in the config file. pub wait_for_block_download: bool, - pub nonce_cache_size: u64, - pub candidate_retry_cache_size: u64, + pub nonce_cache_size: usize, + pub candidate_retry_cache_size: usize, pub unprocessed_block_deadline_secs: u64, pub mining_key: Option, /// Amount of time while mining in nakamoto to wait in between mining interim blocks @@ -2599,8 +2599,8 @@ pub struct MinerConfigFile { pub probability_pick_no_estimate_tx: Option, pub block_reward_recipient: Option, pub segwit: Option, - pub nonce_cache_size: Option, - pub candidate_retry_cache_size: Option, + pub nonce_cache_size: Option, + pub candidate_retry_cache_size: Option, pub unprocessed_block_deadline_secs: Option, pub mining_key: Option, pub wait_on_interim_blocks_ms: Option, diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index d21f46c3c1e..603b2c4b04c 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -15,7 +15,7 @@ // along with this program. If not, see . use std::cmp::{self, Ordering}; -use std::collections::{HashMap, HashSet, VecDeque}; +use std::collections::{HashMap, HashSet, LinkedList, VecDeque}; use std::hash::Hasher; use std::io::{Read, Write}; use std::ops::{Deref, DerefMut}; @@ -55,6 +55,7 @@ use crate::chainstate::stacks::{ Error as ChainstateError, StacksBlock, StacksMicroblock, StacksTransaction, TransactionPayload, }; use crate::clarity_vm::clarity::ClarityConnection; +use crate::core::nonce_cache::NonceCache; use crate::core::{ ExecutionCost, StacksEpochId, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, }; @@ -524,10 +525,10 @@ pub struct MemPoolWalkSettings { /// either failed to get a cost estimate or has not been estimated yet. pub consider_no_estimate_tx_prob: u8, /// Size of the nonce cache. This avoids MARF look-ups. - pub nonce_cache_size: u64, + pub nonce_cache_size: usize, /// Size of the candidate cache. These are the candidates that will be retried after each /// transaction is mined. - pub candidate_retry_cache_size: u64, + pub candidate_retry_cache_size: usize, /// Types of transactions we'll consider pub txs_to_consider: HashSet, /// Origins for transactions that we'll consider @@ -997,125 +998,6 @@ impl<'a> MemPoolTx<'a> { } } -/// Used to locally cache nonces to avoid repeatedly looking them up in the nonce. -struct NonceCache { - cache: HashMap, - /// The maximum size that this cache can be. - max_cache_size: usize, -} - -impl NonceCache { - fn new(nonce_cache_size: u64) -> Self { - let max_size: usize = nonce_cache_size - .try_into() - .expect("Could not cast `nonce_cache_size` as `usize`."); - Self { - cache: HashMap::new(), - max_cache_size: max_size, - } - } - - /// Get a nonce from the cache. - /// First, the RAM cache will be checked for this address. - /// If absent, then the `nonces` table will be queried for this address. - /// If absent, then the MARF will be queried for this address. - /// - /// If not in RAM, the nonce will be opportunistically stored to the `nonces` table. If that - /// fails due to lock contention, then the method will return `true` for its second tuple argument. - /// - /// Returns (nonce, should-try-store-again?) - fn get( - &mut self, - address: &StacksAddress, - clarity_tx: &mut C, - mempool_db: &DBConn, - ) -> (u64, bool) - where - C: ClarityConnection, - { - #[cfg(test)] - assert!(self.cache.len() <= self.max_cache_size); - - // Check in-memory cache - match self.cache.get(address) { - Some(nonce) => (*nonce, false), - None => { - // Check sqlite cache - let opt_nonce = match db_get_nonce(mempool_db, address) { - Ok(opt_nonce) => opt_nonce, - Err(e) => { - warn!("error retrieving nonce from mempool db: {}", e); - None - } - }; - match opt_nonce { - Some(nonce) => { - // Copy this into the in-memory cache if there is space - if self.cache.len() < self.max_cache_size { - self.cache.insert(address.clone(), nonce); - } - (nonce, false) - } - None => { - let nonce = - StacksChainState::get_nonce(clarity_tx, &address.clone().into()); - - let should_store_again = match db_set_nonce(mempool_db, address, nonce) { - Ok(_) => false, - Err(e) => { - debug!("error caching nonce to sqlite: {}", e); - true - } - }; - - if self.cache.len() < self.max_cache_size { - self.cache.insert(address.clone(), nonce); - } - (nonce, should_store_again) - } - } - } - } - } - - /// Store the (address, nonce) pair to the `nonces` table. - /// If storage fails, return false. - /// Otherwise return true. - fn update(&mut self, address: StacksAddress, value: u64, mempool_db: &DBConn) -> bool { - // Sqlite cache - let success = match db_set_nonce(mempool_db, &address, value) { - Ok(_) => true, - Err(e) => { - warn!("error caching nonce to sqlite: {}", e); - false - } - }; - - // In-memory cache - if let Some(nonce) = self.cache.get_mut(&address) { - *nonce = value; - } - - success - } -} - -fn db_set_nonce(conn: &DBConn, address: &StacksAddress, nonce: u64) -> Result<(), db_error> { - let addr_str = address.to_string(); - let nonce_i64 = u64_to_sql(nonce)?; - - let sql = "INSERT OR REPLACE INTO nonces (address, nonce) VALUES (?1, ?2)"; - conn.execute(sql, params![addr_str, nonce_i64])?; - Ok(()) -} - -fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, db_error> { - let addr_str = address.to_string(); - - let sql = "SELECT nonce FROM nonces WHERE address = ?"; - query_row(conn, sql, params![addr_str]) -} - #[cfg(test)] pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM nonces"; @@ -1143,14 +1025,11 @@ struct CandidateCache { } impl CandidateCache { - fn new(candidate_retry_cache_size: u64) -> Self { - let max_size: usize = candidate_retry_cache_size - .try_into() - .expect("Could not cast `candidate_retry_cache_size` as usize."); + fn new(candidate_retry_cache_size: usize) -> Self { Self { cache: VecDeque::new(), next: VecDeque::new(), - max_cache_size: max_size, + max_cache_size: candidate_retry_cache_size, } } @@ -1634,10 +1513,6 @@ impl MemPoolDB { let mut candidate_cache = CandidateCache::new(settings.candidate_retry_cache_size); let mut nonce_cache = NonceCache::new(settings.nonce_cache_size); - // set of (address, nonce) to store after the inner loop completes. This will be done in a - // single transaction. This cannot grow to more than `settings.nonce_cache_size` entries. - let mut retry_store = HashMap::new(); - let sql = " SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate FROM mempool @@ -1708,29 +1583,11 @@ impl MemPoolDB { }; // Check the nonces. - let (expected_origin_nonce, retry_store_origin_nonce) = - nonce_cache.get(&candidate.origin_address, clarity_tx, self.conn()); - let (expected_sponsor_nonce, retry_store_sponsor_nonce) = - nonce_cache.get(&candidate.sponsor_address, clarity_tx, self.conn()); - - // Try storing these nonces later if we failed to do so here, e.g. due to some other - // thread holding the write-lock on the mempool DB. - if retry_store_origin_nonce { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, - candidate.origin_address.clone(), - expected_origin_nonce, - ); - } - if retry_store_sponsor_nonce { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, - candidate.sponsor_address.clone(), - expected_sponsor_nonce, - ); - } + let mut nonce_conn = self.reopen(false)?; + let expected_origin_nonce = + nonce_cache.get(&candidate.origin_address, clarity_tx, &mut nonce_conn); + let expected_sponsor_nonce = + nonce_cache.get(&candidate.sponsor_address, clarity_tx, &mut nonce_conn); match order_nonces( candidate.origin_nonce, @@ -1843,34 +1700,17 @@ impl MemPoolDB { match tx_event { TransactionEvent::Success(_) => { // Bump nonces in the cache for the executed transaction - let stored = nonce_cache.update( + nonce_cache.set( consider.tx.metadata.origin_address, expected_origin_nonce + 1, - self.conn(), + &mut nonce_conn, ); - if !stored { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, - consider.tx.metadata.origin_address, - expected_origin_nonce + 1, - ); - } - if consider.tx.tx.auth.is_sponsored() { - let stored = nonce_cache.update( + nonce_cache.set( consider.tx.metadata.sponsor_address, expected_sponsor_nonce + 1, - self.conn(), + &mut nonce_conn, ); - if !stored { - Self::save_nonce_for_retry( - &mut retry_store, - settings.nonce_cache_size, - consider.tx.metadata.sponsor_address, - expected_sponsor_nonce + 1, - ); - } } output_events.push(tx_event); } @@ -1904,13 +1744,8 @@ impl MemPoolDB { drop(query_stmt_null); drop(query_stmt_fee); - if !retry_store.is_empty() { - let tx = self.tx_begin()?; - for (address, nonce) in retry_store.into_iter() { - nonce_cache.update(address, nonce, &tx); - } - tx.commit()?; - } + // Write through the nonce cache to the database + nonce_cache.flush(&mut self.db); debug!( "Mempool iteration finished"; diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 899f9d4a2fa..10aaece8ccb 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -30,6 +30,7 @@ use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::{Burnchain, Error as burnchain_error}; use crate::chainstate::burn::ConsensusHash; pub mod mempool; +pub mod nonce_cache; #[cfg(test)] pub mod tests; diff --git a/stackslib/src/core/nonce_cache.rs b/stackslib/src/core/nonce_cache.rs new file mode 100644 index 00000000000..54c9acb5635 --- /dev/null +++ b/stackslib/src/core/nonce_cache.rs @@ -0,0 +1,253 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::HashMap; +use std::thread; +use std::time::Duration; + +use clarity::types::chainstate::StacksAddress; +use clarity::util::lru_cache::LruCache; +use clarity::vm::clarity::ClarityConnection; +use rand::Rng; +use rusqlite::params; + +use super::mempool::MemPoolTx; +use super::MemPoolDB; +use crate::chainstate::stacks::db::StacksChainState; +use crate::util_lib::db::{query_row, u64_to_sql, DBConn, Error as db_error}; + +/// Used to cache nonces in memory and in the mempool database. +/// 1. MARF - source of truth for nonces +/// 2. Nonce DB - table in mempool sqlite database +/// 3. HashMap - in-memory cache for nonces +/// The in-memory cache is restricted to a maximum size to avoid memory +/// exhaustion. When the cache is full, it should be flushed to the database +/// and cleared. It is recommended to do this in between batches of candidate +/// transactions from the mempool. +pub struct NonceCache { + /// In-memory LRU cache of nonces. + cache: LruCache, +} + +impl NonceCache { + pub fn new(max_size: usize) -> Self { + Self { + cache: LruCache::new(max_size), + } + } + + /// Get a nonce. + /// First, the RAM cache will be checked for this address. + /// If absent, then the `nonces` table will be queried for this address. + /// If absent, then the MARF will be queried for this address. + /// + /// If not in RAM, the nonce will be opportunistically stored to the `nonces` table. If that + /// fails due to lock contention, then the method will return `true` for its second tuple argument. + /// + /// Returns (nonce, should-try-store-again?) + pub fn get( + &mut self, + address: &StacksAddress, + clarity_tx: &mut C, + mempool_db: &mut DBConn, + ) -> u64 + where + C: ClarityConnection, + { + // Check in-memory cache + match self.cache.get(address) { + Some(nonce) => nonce, + None => { + // Check sqlite cache + let opt_nonce = match db_get_nonce(mempool_db, address) { + Ok(opt_nonce) => opt_nonce, + Err(e) => { + warn!("error retrieving nonce from mempool db: {}", e); + None + } + }; + match opt_nonce { + Some(nonce) => { + // Insert into in-memory cache, but it is not dirty, + // since we just got it from the database. + let evicted = self.cache.insert_clean(address.clone(), nonce); + if evicted.is_some() { + // If we evicted something, we need to flush the cache. + self.flush_with_evicted(mempool_db, evicted); + } + nonce + } + None => { + let nonce = + StacksChainState::get_nonce(clarity_tx, &address.clone().into()); + + self.set(address.clone(), nonce, mempool_db); + nonce + } + } + } + } + } + + /// Store the (address, nonce) pair to the `nonces` table. + /// If storage fails, return false. + /// Otherwise return true. + pub fn set(&mut self, address: StacksAddress, value: u64, conn: &mut DBConn) { + let evicted = self.cache.insert(address.clone(), value); + if evicted.is_some() { + // If we evicted something, we need to flush the cache. + self.flush_with_evicted(conn, evicted); + } + } + + pub fn flush_with_evicted(&mut self, conn: &mut DBConn, evicted: Option<(StacksAddress, u64)>) { + const MAX_BACKOFF: Duration = Duration::from_secs(30); + let mut backoff = Duration::from_millis(rand::thread_rng().gen_range(50..200)); + + loop { + let result = self.try_flush_with_evicted(conn, evicted); + + match result { + Ok(_) => return, // Success: exit the loop + Err(e) => { + // Calculate a backoff duration + warn!("Nonce cache flush failed: {e}. Retrying in {backoff:?}"); + + // Sleep for the backoff duration + thread::sleep(backoff); + + if backoff < MAX_BACKOFF { + // Exponential backoff + backoff = backoff * 2 + + Duration::from_millis(rand::thread_rng().gen_range(50..200)); + } + } + } + } + } + + pub fn try_flush_with_evicted( + &mut self, + conn: &mut DBConn, + evicted: Option<(StacksAddress, u64)>, + ) -> Result<(), db_error> { + // Flush the cache to the database + let sql = "INSERT OR REPLACE INTO nonces (address, nonce) VALUES (?1, ?2)"; + + let tx = conn.transaction()?; + + if let Some((addr, nonce)) = evicted { + tx.execute(sql, params![addr, nonce])?; + } + + self.cache.flush(|addr, nonce| { + tx.execute(sql, params![addr, nonce])?; + Ok::<(), db_error>(()) + })?; + + tx.commit()?; + + Ok(()) + } + + pub fn flush(&mut self, conn: &mut DBConn) { + self.flush_with_evicted(conn, None) + } +} + +fn db_set_nonce(conn: &DBConn, address: &StacksAddress, nonce: u64) -> Result<(), db_error> { + let addr_str = address.to_string(); + let nonce_i64 = u64_to_sql(nonce)?; + + let sql = "INSERT OR REPLACE INTO nonces (address, nonce) VALUES (?1, ?2)"; + conn.execute(sql, params![addr_str, nonce_i64])?; + Ok(()) +} + +fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, db_error> { + let addr_str = address.to_string(); + + let sql = "SELECT nonce FROM nonces WHERE address = ?"; + query_row(conn, sql, params![addr_str]) +} + +#[cfg(test)] +mod tests { + use clarity::consts::CHAIN_ID_TESTNET; + use clarity::types::chainstate::StacksBlockId; + use clarity::types::Address; + use clarity::vm::tests::{TEST_BURN_STATE_DB, TEST_HEADER_DB}; + + use super::*; + use crate::chainstate::stacks::db::test::{chainstate_path, instantiate_chainstate}; + use crate::chainstate::stacks::index::ClarityMarfTrieId; + use crate::clarity_vm::clarity::ClarityInstance; + use crate::clarity_vm::database::marf::MarfedKV; + + #[test] + fn test_nonce_cache() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let mut cache = NonceCache::new(2); + + let addr1 = + StacksAddress::from_string("ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM").unwrap(); + let addr2 = + StacksAddress::from_string("ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5").unwrap(); + let addr3 = + StacksAddress::from_string("ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG").unwrap(); + + let conn = &mut mempool.db; + cache.set(addr1.clone(), 1, conn); + cache.set(addr2.clone(), 2, conn); + + let marf = MarfedKV::temporary(); + let mut clarity_instance = ClarityInstance::new(false, CHAIN_ID_TESTNET, marf); + clarity_instance + .begin_test_genesis_block( + &StacksBlockId::sentinel(), + &StacksBlockId([0 as u8; 32]), + &TEST_HEADER_DB, + &TEST_BURN_STATE_DB, + ) + .commit_block(); + let mut clarity_conn = clarity_instance.begin_block( + &StacksBlockId([0 as u8; 32]), + &StacksBlockId([1 as u8; 32]), + &TEST_HEADER_DB, + &TEST_BURN_STATE_DB, + ); + + clarity_conn.as_transaction(|clarity_tx| { + assert_eq!(cache.get(&addr1, clarity_tx, conn), 1); + assert_eq!(cache.get(&addr2, clarity_tx, conn), 2); + // addr3 is not in the cache, so it should be fetched from the + // clarity instance (and get 0) + assert_eq!(cache.get(&addr3, clarity_tx, conn), 0); + }); + } + + #[test] + fn test_db_set_nonce() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let conn = &mut mempool.db; + let addr = StacksAddress::from_string("ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC").unwrap(); + db_set_nonce(&conn, &addr, 123).unwrap(); + assert_eq!(db_get_nonce(&conn, &addr).unwrap().unwrap(), 123); + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 7155cf5966b..81778540ed3 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -439,6 +439,13 @@ impl BlockMinerThread { )) })?; + // Reset the nonce cache, since it is only updated while mining + let mut mem_pool = self + .config + .connect_mempool_db() + .expect("Database failure opening mempool"); + mem_pool.reset_nonce_cache()?; + // now, actually run this tenure loop { if let Err(e) = self.miner_main_loop( From e7eb1cca53dc85ee9ace7eff85b53f456f0d130e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 19 Dec 2024 20:18:47 -0500 Subject: [PATCH 043/238] chore: remove debug print --- stacks-common/src/util/lru_cache.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 97b55e69bc1..41f55613e21 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -167,7 +167,6 @@ impl LruCache { pub fn flush(&mut self, mut f: impl FnMut(&K, V) -> Result<(), E>) -> Result<(), E> { let mut index = self.head; while index != self.capacity { - println!("checking {index}, dirty? {}", self.order[index].dirty); let next = self.order[index].next; if self.order[index].dirty { let value = self.order[index].value; From 9eb392104a980b293c56837e542b95b6dd0f3fe8 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 20 Dec 2024 15:00:40 -0500 Subject: [PATCH 044/238] chore: small changes from code review --- stacks-common/src/util/lru_cache.rs | 2 +- stackslib/src/core/nonce_cache.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 41f55613e21..2f939116215 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; +use hashbrown::HashMap; /// Node in the doubly linked list struct Node { diff --git a/stackslib/src/core/nonce_cache.rs b/stackslib/src/core/nonce_cache.rs index 54c9acb5635..80a8e3ffc75 100644 --- a/stackslib/src/core/nonce_cache.rs +++ b/stackslib/src/core/nonce_cache.rs @@ -219,7 +219,7 @@ mod tests { clarity_instance .begin_test_genesis_block( &StacksBlockId::sentinel(), - &StacksBlockId([0 as u8; 32]), + &StacksBlockId([0u8; 32]), &TEST_HEADER_DB, &TEST_BURN_STATE_DB, ) From 5269ed5b51fdd3eab57f0164297035c25887f10a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 3 Jan 2025 15:24:11 -0500 Subject: [PATCH 045/238] chore: fix clippy warning --- stacks-common/src/util/lru_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 2f939116215..58c694103b4 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -140,7 +140,7 @@ impl LruCache { let node = Node { key: key.clone(), value, - dirty: dirty, + dirty, next: self.head, prev: self.capacity, }; From 725152474ef9cb0b301b80061b5885fbe2e33abf Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 4 Mar 2025 14:57:10 +0100 Subject: [PATCH 046/238] refactored max_execution_time api --- Cargo.lock | 1436 ++++++++++++--------- clarity/Cargo.toml | 2 - clarity/src/vm/contexts.rs | 8 + clarity/src/vm/mod.rs | 66 +- clarity/src/vm/tests/simple_apply_eval.rs | 16 +- 5 files changed, 921 insertions(+), 607 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0cb2ebfc1e5..a6728bf314a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,18 +4,18 @@ version = 3 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "adler32" @@ -79,30 +79,30 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "android-tzdata" @@ -121,57 +121,59 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "once_cell", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "ascii" @@ -223,13 +225,12 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", - "event-listener 5.0.0", - "event-listener-strategy 0.5.0", + "event-listener-strategy", "futures-core", "pin-project-lite", ] @@ -240,21 +241,20 @@ version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c2886ab563af5038f79ec016dd7b87947ed138b794e8dd64992962c9cca0411" dependencies = [ - "async-lock 3.3.0", + "async-lock 3.4.0", "futures-io", ] [[package]] name = "async-executor" -version = "1.8.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ - "async-lock 3.3.0", "async-task", "concurrent-queue", - "fastrand 2.0.1", - "futures-lite 2.2.0", + "fastrand 2.3.0", + "futures-lite 2.6.0", "slab", ] @@ -264,12 +264,12 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 2.2.0", + "async-channel 2.3.1", "async-executor", - "async-io 2.3.1", - "async-lock 3.3.0", + "async-io 2.4.0", + "async-lock 3.4.0", "blocking", - "futures-lite 2.2.0", + "futures-lite 2.6.0", "once_cell", ] @@ -304,7 +304,7 @@ dependencies = [ "log", "parking", "polling 2.8.0", - "rustix 0.37.27", + "rustix 0.37.28", "slab", "socket2 0.4.10", "waker-fn", @@ -312,21 +312,21 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f97ab0c5b00a7cdbe5a371b9a782ee7be1316095885c8a4ea1daf490eb0ef65" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" dependencies = [ - "async-lock 3.3.0", + "async-lock 3.4.0", "cfg-if 1.0.0", "concurrent-queue", "futures-io", - "futures-lite 2.2.0", + "futures-lite 2.6.0", "parking", - "polling 3.4.0", - "rustix 0.38.31", + "polling 3.7.4", + "rustix 0.38.44", "slab", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -340,31 +340,31 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 4.0.3", - "event-listener-strategy 0.4.0", + "event-listener 5.4.0", + "event-listener-strategy", "pin-project-lite", ] [[package]] name = "async-std" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", + "async-io 2.4.0", + "async-lock 3.4.0", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 1.13.0", + "futures-lite 2.6.0", "gloo-timers", "kv-log-macro", "log", @@ -378,9 +378,9 @@ dependencies = [ [[package]] name = "async-task" -version = "4.7.0" +version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "atomic-waker" @@ -388,22 +388,11 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi 0.3.9", -] - [[package]] name = "autocfg" -version = "1.1.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backoff" @@ -411,24 +400,24 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.15", "instant", "rand 0.8.5", ] [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if 1.0.0", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -469,9 +458,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" [[package]] name = "block-buffer" @@ -493,25 +482,22 @@ dependencies = [ [[package]] name = "blocking" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel 2.2.0", - "async-lock 3.3.0", + "async-channel 2.3.1", "async-task", - "fastrand 2.0.1", "futures-io", - "futures-lite 2.2.0", + "futures-lite 2.6.0", "piper", - "tracing", ] [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "byteorder" @@ -521,17 +507,17 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" [[package]] name = "cc" -version = "1.0.83" +version = "1.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" dependencies = [ - "libc", + "shlex", ] [[package]] @@ -548,16 +534,16 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.34" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "wasm-bindgen", - "windows-targets 0.52.0", + "windows-link", ] [[package]] @@ -577,9 +563,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.0" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80c21025abd42669a92efc996ef13cfb2c5c627858421ea58d5c3b331a6c134f" +checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" dependencies = [ "clap_builder", "clap_derive", @@ -587,9 +573,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.0" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458bf1f341769dfcf849846f65dffdf9146daa56bcd2a47cb4e1de9915567c99" +checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" dependencies = [ "anstream", "anstyle", @@ -599,21 +585,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.0" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.99", ] [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "clarity" @@ -634,34 +620,31 @@ dependencies = [ "serde_derive", "serde_json", "serde_stacker", - "serial_test", "slog", "stacks-common", - "stackslib", "time 0.2.27", ] [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "colored" -version = "2.1.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e" dependencies = [ - "lazy_static", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] name = "concurrent-queue" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -674,9 +657,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_fn" -version = "0.4.9" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" +checksum = "2f8a2ca5ac02d09563609681103aada9e1777d54fc57a5acd7a41404f9c93b6e" [[package]] name = "cookie" @@ -707,15 +690,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] @@ -728,18 +711,18 @@ checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crypto-common" @@ -795,7 +778,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "subtle", "zeroize", ] @@ -808,20 +791,20 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.99", ] [[package]] name = "data-encoding" -version = "2.5.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "zeroize", @@ -891,6 +874,17 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.99", +] + [[package]] name = "ed25519" version = "2.2.3" @@ -919,27 +913,27 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -950,20 +944,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener" -version = "5.0.0" +version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b72557800024fabbaa2449dd4bf24e37b93702d457a4d4f2b0dd1f0f039f20c1" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" dependencies = [ "concurrent-queue", "parking", @@ -972,21 +955,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener 4.0.3", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.5.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ - "event-listener 5.0.0", + "event-listener 5.4.0", "pin-project-lite", ] @@ -1025,15 +998,15 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "fiat-crypto" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "fnv" @@ -1074,9 +1047,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1105,9 +1078,9 @@ checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1137,11 +1110,11 @@ dependencies = [ [[package]] name = "futures-lite" -version = "2.2.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" dependencies = [ - "fastrand 2.0.1", + "fastrand 2.3.0", "futures-core", "futures-io", "parking", @@ -1156,7 +1129,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.99", ] [[package]] @@ -1173,9 +1146,9 @@ checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -1227,15 +1200,27 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if 1.0.0", "libc", "wasi 0.11.0+wasi-snapshot-preview1", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", +] + [[package]] name = "ghash" version = "0.3.1" @@ -1248,15 +1233,15 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.1" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "gloo-timers" -version = "0.2.6" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" dependencies = [ "futures-channel", "futures-core", @@ -1275,7 +1260,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 0.2.11", + "http 0.2.12", "indexmap", "slab", "tokio", @@ -1285,16 +1270,16 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.1.0", + "http 1.2.0", "indexmap", "slab", "tokio", @@ -1304,9 +1289,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", ] @@ -1329,7 +1314,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -1341,7 +1326,7 @@ dependencies = [ "base64 0.21.7", "bytes", "headers-core", - "http 0.2.11", + "http 0.2.12", "httpdate", "mime", "sha1 0.10.6", @@ -1353,29 +1338,26 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http 0.2.11", + "http 0.2.12", ] [[package]] name = "heck" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.1.19" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hermit-abi" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "hkdf" @@ -1399,9 +1381,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -1410,9 +1392,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -1426,7 +1408,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http 0.2.11", + "http 0.2.12", "pin-project-lite", ] @@ -1437,7 +1419,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http 1.2.0", ] [[package]] @@ -1448,7 +1430,7 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "pin-project-lite", ] @@ -1477,9 +1459,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.8.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -1489,22 +1471,22 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", "h2 0.3.26", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.8", "tokio", "tower-service", "tracing", @@ -1513,15 +1495,15 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.6", - "http 1.1.0", + "h2 0.4.8", + "http 1.2.0", "http-body 1.0.1", "httparse", "httpdate", @@ -1538,8 +1520,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http 0.2.11", - "hyper 0.14.28", + "http 0.2.12", + "hyper 0.14.32", "rustls", "tokio", "tokio-rustls", @@ -1547,24 +1529,24 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", - "hyper 1.4.1", + "hyper 1.6.0", "pin-project-lite", "tokio", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1583,21 +1565,150 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.99", +] + [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] name = "indexmap" -version = "2.7.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -1611,9 +1722,9 @@ checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if 1.0.0", ] @@ -1633,7 +1744,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.6", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -1649,22 +1760,40 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "is-terminal" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" +dependencies = [ + "hermit-abi 0.4.0", + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -1698,15 +1827,15 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.170" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" [[package]] name = "libflate" @@ -1730,13 +1859,12 @@ dependencies = [ [[package]] name = "libredox" -version = "0.0.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.9.0", "libc", - "redox_syscall 0.4.1", ] [[package]] @@ -1799,9 +1927,15 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "litemap" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "lock_api" @@ -1815,9 +1949,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" dependencies = [ "value-bag", ] @@ -1833,9 +1967,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memoffset" @@ -1854,9 +1988,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -1864,11 +1998,11 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" dependencies = [ - "adler", + "adler2", ] [[package]] @@ -1892,13 +2026,13 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1915,21 +2049,21 @@ dependencies = [ [[package]] name = "mockito" -version = "1.5.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b34bd91b9e5c5b06338d392463e1318d683cf82ec3d3af4014609be6e2108d" +checksum = "7760e0e418d9b7e5777c0374009ca4c93861b9066f18cb334a20ce50ab63aa48" dependencies = [ "assert-json-diff 2.0.2", "bytes", "colored", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.4.1", + "hyper 1.6.0", "hyper-util", "log", - "rand 0.8.5", + "rand 0.9.0", "regex", "serde_json", "serde_urlencoded", @@ -1946,7 +2080,7 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http 0.2.11", + "http 0.2.12", "httparse", "log", "memchr", @@ -2003,52 +2137,33 @@ checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi 0.3.6", - "libc", -] - -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ - "libc", -] - [[package]] name = "object" -version = "0.32.2" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "overload" @@ -2058,9 +2173,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -2080,9 +2195,9 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.5.7", + "redox_syscall", "smallvec", - "windows-targets 0.52.0", + "windows-targets 0.52.6", ] [[package]] @@ -2099,29 +2214,29 @@ checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.99", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -2131,12 +2246,12 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.0.1", + "fastrand 2.3.0", "futures-io", ] @@ -2152,9 +2267,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "polling" @@ -2174,16 +2289,17 @@ dependencies = [ [[package]] name = "polling" -version = "3.4.0" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if 1.0.0", "concurrent-queue", + "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.31", + "rustix 0.38.44", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2215,9 +2331,12 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy 0.7.35", +] [[package]] name = "proc-macro-error" @@ -2251,9 +2370,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -2280,18 +2399,18 @@ checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "psm" -version = "0.1.21" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874" +checksum = "f58e5423e24c18cc840e1c98370b3993c6649cd1678b4d24318bcf0a083cbe88" dependencies = [ "cc", ] [[package]] name = "quote" -version = "1.0.35" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" dependencies = [ "proc-macro2", ] @@ -2320,6 +2439,17 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", + "zerocopy 0.8.21", +] + [[package]] name = "rand_chacha" version = "0.2.2" @@ -2340,6 +2470,16 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + [[package]] name = "rand_core" version = "0.5.1" @@ -2355,57 +2495,57 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.15", ] [[package]] -name = "rand_hc" -version = "0.2.0" +name = "rand_core" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "rand_core 0.5.1", + "getrandom 0.3.1", ] [[package]] -name = "redox_syscall" -version = "0.4.1" +name = "rand_hc" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "bitflags 1.3.2", + "rand_core 0.5.1", ] [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.9.0", ] [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.12", + "getrandom 0.2.15", "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.10.3" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] @@ -2419,13 +2559,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.5", ] [[package]] @@ -2436,9 +2576,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "relay-server" @@ -2449,9 +2589,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.24" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64 0.21.7", "bytes", @@ -2459,9 +2599,9 @@ dependencies = [ "futures-core", "futures-util", "h2 0.3.26", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.28", + "hyper 0.14.32", "hyper-rustls", "ipnet", "js-sys", @@ -2490,16 +2630,16 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" dependencies = [ "cc", - "getrandom 0.2.12", + "cfg-if 1.0.0", + "getrandom 0.2.15", "libc", - "spin 0.9.8", "untrusted", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2535,7 +2675,7 @@ dependencies = [ "futures", "futures-timer", "rstest_macros", - "rustc_version 0.4.0", + "rustc_version 0.4.1", ] [[package]] @@ -2547,7 +2687,7 @@ dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "syn 1.0.109", "unicode-ident", ] @@ -2560,7 +2700,7 @@ checksum = "45f80dcc84beab3a327bbe161f77db25f336a1452428176787c8c79ac79d7073" dependencies = [ "quote", "rand 0.8.5", - "rustc_version 0.4.0", + "rustc_version 0.4.1", "syn 1.0.109", ] @@ -2570,7 +2710,7 @@ version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.9.0", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2581,9 +2721,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc_version" @@ -2596,18 +2736,18 @@ dependencies = [ [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.21", + "semver 1.0.26", ] [[package]] name = "rustix" -version = "0.37.27" +version = "0.37.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +checksum = "519165d378b97752ca44bbe15047d5d3409e875f39327546b42ac81d7e18c1b6" dependencies = [ "bitflags 1.3.2", "errno", @@ -2619,15 +2759,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.9.0", "errno", "libc", - "linux-raw-sys 0.4.13", - "windows-sys 0.52.0", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", ] [[package]] @@ -2663,21 +2803,21 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "scc" -version = "2.3.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28e1c91382686d21b5ac7959341fcb9780fa7c03773646995a87c950fa7be640" +checksum = "ea091f6cac2595aa38993f04f4ee692ed43757035c36e67c180b6828356385b1" dependencies = [ "sdd", ] @@ -2706,9 +2846,9 @@ dependencies = [ [[package]] name = "sdd" -version = "3.0.5" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478f121bb72bbf63c52c93011ea1791dca40140dfe13f8336c4c5ac952c33aa9" +checksum = "b07779b9b918cc05650cb30f404d4d7835d26df37c235eded8a6832e2fb82cca" [[package]] name = "secp256k1" @@ -2740,9 +2880,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "semver-parser" @@ -2752,31 +2892,32 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.196" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.99", ] [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -2794,9 +2935,9 @@ dependencies = [ [[package]] name = "serde_stacker" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "babfccff5773ff80657f0ecf553c7c516bdc2eb16389c0918b36b73e7015276e" +checksum = "69c8defe6c780725cce4ec6ad3bd91e321baf6fa4e255df1f31e345d507ef01a" dependencies = [ "serde", "stacker", @@ -2836,7 +2977,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.99", ] [[package]] @@ -2861,9 +3002,9 @@ dependencies = [ [[package]] name = "sha1_smol" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" [[package]] name = "sha2" @@ -2892,9 +3033,9 @@ dependencies = [ [[package]] name = "sha2-asm" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27ba7066011e3fb30d808b51affff34f0a66d3a03a58edd787c6e420e40e44e" +checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" dependencies = [ "cc", ] @@ -2918,6 +3059,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signature" version = "2.2.0" @@ -2929,9 +3076,9 @@ dependencies = [ [[package]] name = "similar" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" [[package]] name = "siphasher" @@ -2963,27 +3110,27 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.36", + "time 0.3.37", ] [[package]] name = "slog-term" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87d29185c55b7b258b4f120eab00f48557d4d9bc814f41713f449d35b0f8977c" +checksum = "b6e022d0b998abfe5c3782c1f03551a596269450ccd677ea51c56f8b214610e8" dependencies = [ - "atty", + "is-terminal", "slog", "term", "thread_local", - "time 0.3.36", + "time 0.3.37", ] [[package]] name = "smallvec" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" [[package]] name = "socket2" @@ -2997,12 +3144,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3027,17 +3174,23 @@ dependencies = [ "der", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "stacker" -version = "0.1.15" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c886bd4480155fd3ef527d45e9ac8dd7118a898a46530b7b94c3e21866259fce" +checksum = "d9156ebd5870ef293bfb43f91c7a74528d363ec0d424afe24160ed5a4343d08a" dependencies = [ "cc", "cfg-if 1.0.0", "libc", "psm", - "winapi 0.3.9", + "windows-sys 0.59.0", ] [[package]] @@ -3208,9 +3361,9 @@ dependencies = [ [[package]] name = "stdext" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6012f6ef4d674ce7021a8b0f5093f7e339f54d4ba04fc1f9c901659459b4f35b" +checksum = "4af28eeb7c18ac2dbdb255d40bee63f203120e1db6b0024b177746ebec7049c1" [[package]] name = "stdweb" @@ -3263,9 +3416,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "stx-genesis" @@ -3277,9 +3430,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" @@ -3294,9 +3447,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.58" +version = "2.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" +checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2" dependencies = [ "proc-macro2", "quote", @@ -3309,6 +3462,17 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.99", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -3332,15 +3496,16 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.11.0" +version = "3.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fcd239983515c23a32fb82099f97d0b11b8c72f654ed659363a95c3dad7a53" +checksum = "22e5a0acb1f3f55f65cc4a866c361b2fb2a0ff6366785ae6fbb5f85df07ba230" dependencies = [ "cfg-if 1.0.0", - "fastrand 2.0.1", + "fastrand 2.3.0", + "getrandom 0.3.1", "once_cell", - "rustix 0.38.31", - "windows-sys 0.52.0", + "rustix 0.38.44", + "windows-sys 0.59.0", ] [[package]] @@ -3356,29 +3521,29 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.65" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.99", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -3421,19 +3586,17 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", - "libc", "num-conv", - "num_threads", "powerfmt", "serde", "time-core", - "time-macros 0.2.18", + "time-macros 0.2.19", ] [[package]] @@ -3454,9 +3617,9 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -3488,35 +3651,29 @@ dependencies = [ ] [[package]] -name = "tinyvec" -version = "1.6.0" +name = "tinystr" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" dependencies = [ - "tinyvec_macros", + "displaydoc", + "zerovec", ] -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - [[package]] name = "tokio" -version = "1.36.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", "libc", - "mio 0.8.11", - "num_cpus", + "mio 1.0.3", "parking_lot", "pin-project-lite", - "socket2 0.5.5", - "windows-sys 0.48.0", + "socket2 0.5.8", + "windows-sys 0.52.0", ] [[package]] @@ -3529,22 +3686,11 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-stream" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-tungstenite" -version = "0.20.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" +checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" dependencies = [ "futures-util", "log", @@ -3554,16 +3700,15 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] @@ -3577,15 +3722,15 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -3595,20 +3740,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.99", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -3627,9 +3772,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -3651,14 +3796,14 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tungstenite" -version = "0.20.1" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" dependencies = [ "byteorder", "bytes", "data-encoding", - "http 0.2.11", + "http 1.2.0", "httparse", "log", "rand 0.8.5", @@ -3670,39 +3815,21 @@ dependencies = [ [[package]] name = "typenum" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "unicase" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" -dependencies = [ - "version_check", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.15" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" - -[[package]] -name = "unicode-normalization" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "universal-hash" @@ -3722,9 +3849,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.0" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna", @@ -3738,23 +3865,35 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "value-bag" -version = "1.7.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126e423afe2dd9ac52142e7e9d5ce4135d7e13776c529d27fd6bc49f19e3280b" +checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" [[package]] name = "vcpkg" @@ -3764,15 +3903,15 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "waker-fn" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" [[package]] name = "want" @@ -3785,29 +3924,27 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e92e22e03ff1230c03a1a8ee37d2f89cd489e2e541b7550d6afad96faed169" +checksum = "4378d202ff965b011c64817db11d5829506d3404edeadb61f190d111da3f231c" dependencies = [ "bytes", "futures-channel", "futures-util", "headers", - "http 0.2.11", - "hyper 0.14.28", + "http 0.2.12", + "hyper 0.14.32", "log", "mime", "mime_guess", "multer", "percent-encoding", "pin-project", - "rustls-pemfile", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-stream", "tokio-tungstenite", "tokio-util", "tower-service", @@ -3826,48 +3963,59 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if 1.0.0", + "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.99", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.41" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if 1.0.0", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3875,28 +4023,31 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.99", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", @@ -3948,9 +4099,15 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.6", ] +[[package]] +name = "windows-link" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" + [[package]] name = "windows-sys" version = "0.48.0" @@ -3966,7 +4123,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -3986,17 +4152,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -4007,9 +4174,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -4019,9 +4186,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -4031,9 +4198,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -4043,9 +4216,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -4055,9 +4228,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -4067,9 +4240,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -4079,9 +4252,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winreg" @@ -4093,6 +4266,27 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.9.0", +] + +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "ws2_32-sys" version = "0.2.1" @@ -4103,28 +4297,116 @@ dependencies = [ "winapi-build", ] +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.99", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive 0.7.35", +] + [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "dcf01143b2dd5d134f11f545cf9f1431b13b749695cb33bcce051e7568f99478" dependencies = [ - "zerocopy-derive", + "zerocopy-derive 0.8.21", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.99", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712c8386f4f4299382c9abee219bee7084f78fb939d88b6840fcc1320d5f6da2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.99", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.99", + "synstructure", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.99", +] diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index ec515ce9628..284e856e498 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -47,8 +47,6 @@ mutants = "0.0.3" # a nightly rustc regression (35dbef235 2021-03-02) prevents criterion from compiling # but it isn't necessary for tests: only benchmarks. therefore, commenting out for now. # criterion = "0.3" -stacks = { package = "stackslib", path = "../stackslib", features = ["default", "testing"] } -serial_test = "3.2.0" [features] default = ["canonical"] diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 7ca3380c012..d8bf31fdaee 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -48,6 +48,8 @@ use crate::vm::types::{ use crate::vm::version::ClarityVersion; use crate::vm::{ast, eval, is_reserved, stx_transfer_consolidated}; +use std::time::Duration; + pub const MAX_CONTEXT_DEPTH: u16 = 256; // TODO: @@ -201,6 +203,7 @@ pub struct GlobalContext<'a, 'hooks> { pub chain_id: u32, pub eval_hooks: Option>, pub execution_time_tracker: Instant, + pub max_execution_time: Option, } #[derive(Serialize, Deserialize, Clone)] @@ -1551,6 +1554,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { chain_id, eval_hooks: None, execution_time_tracker: Instant::now(), + max_execution_time: None, } } @@ -1558,6 +1562,10 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { self.asset_maps.is_empty() } + pub fn set_max_execution_time(&mut self, max_execution_time: Option) { + self.max_execution_time = max_execution_time + } + fn get_asset_map(&mut self) -> Result<&mut AssetMap> { self.asset_maps .last_mut() diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 8cecaae84a7..3872906f590 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -54,13 +54,9 @@ pub mod test_util; pub mod clarity; use std::collections::BTreeMap; -#[cfg(test)] -use std::sync::LazyLock; use std::time::Duration; use serde_json; -#[cfg(test)] -use stacks::util::tests::TestFlag; use stacks_common::types::StacksEpochId; use self::analysis::ContractAnalysis; @@ -91,11 +87,6 @@ use crate::vm::types::{PrincipalData, TypeSignature}; pub use crate::vm::version::ClarityVersion; pub const MAX_CALL_STACK_DEPTH: usize = 64; -pub const MAX_EXECUTION_TIME_SECS: u64 = 30; - -#[cfg(test)] -static TEST_MAX_EXECUTION_TIME: LazyLock> = - LazyLock::new(|| TestFlag::new(Duration::from_secs(MAX_EXECUTION_TIME_SECS))); #[derive(Debug, Clone)] pub struct ParsedContract { @@ -313,14 +304,11 @@ pub fn apply( } } -#[cfg(not(test))] -fn check_max_execution_time_expired(global_context: &GlobalContext) -> bool { - global_context.execution_time_tracker.elapsed() > Duration::from_secs(MAX_EXECUTION_TIME_SECS) -} - -#[cfg(test)] fn check_max_execution_time_expired(global_context: &GlobalContext) -> bool { - global_context.execution_time_tracker.elapsed() > TEST_MAX_EXECUTION_TIME.get() + if let Some(max_execution_time) = global_context.max_execution_time { + return global_context.execution_time_tracker.elapsed() >= max_execution_time; + } + false } pub fn eval( @@ -531,13 +519,17 @@ pub fn execute_on_network(program: &str, use_mainnet: bool) -> Result( program: &str, clarity_version: ClarityVersion, epoch: StacksEpochId, ast_rules: ast::ASTRules, use_mainnet: bool, -) -> Result> { + mut global_context_function: F, +) -> Result> +where + F: FnMut(&mut GlobalContext) -> Result<()>, +{ use crate::vm::database::MemoryBackingStore; use crate::vm::tests::test_only_mainnet_to_chain_id; use crate::vm::types::QualifiedContractIdentifier; @@ -555,6 +547,7 @@ pub fn execute_with_parameters( epoch, ); global_context.execute(|g| { + global_context_function(g)?; let parsed = ast::build_ast_with_rules( &contract_id, program, @@ -568,6 +561,24 @@ pub fn execute_with_parameters( }) } +#[cfg(any(test, feature = "testing"))] +pub fn execute_with_parameters( + program: &str, + clarity_version: ClarityVersion, + epoch: StacksEpochId, + ast_rules: ast::ASTRules, + use_mainnet: bool, +) -> Result> { + execute_with_parameters_and_call_in_global_context( + program, + clarity_version, + epoch, + ast_rules, + use_mainnet, + |_| Ok(()), + ) +} + /// Execute for test with `version`, Epoch20, testnet. #[cfg(any(test, feature = "testing"))] pub fn execute_against_version(program: &str, version: ClarityVersion) -> Result> { @@ -592,6 +603,25 @@ pub fn execute(program: &str) -> Result> { ) } +/// Execute for test in Clarity1, Epoch20, testnet. +#[cfg(any(test, feature = "testing"))] +pub fn execute_with_max_execution_time( + program: &str, + max_execution_time: Duration, +) -> Result> { + execute_with_parameters_and_call_in_global_context( + program, + ClarityVersion::Clarity1, + StacksEpochId::Epoch20, + ast::ASTRules::PrecheckSize, + false, + |g| { + g.set_max_execution_time(Some(max_execution_time)); + Ok(()) + }, + ) +} + /// Execute for test in Clarity2, Epoch21, testnet. #[cfg(any(test, feature = "testing"))] pub fn execute_v2(program: &str) -> Result> { diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index e31874d3932..18d9e287614 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -18,7 +18,6 @@ use std::time::Duration; use rstest::rstest; use rstest_reuse::{self, *}; -use serial_test::serial; use stacks_common::address::{ AddressHashMode, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; @@ -40,9 +39,9 @@ use crate::vm::types::{ StacksAddressExtensions, TypeSignature, }; use crate::vm::{ - eval, execute as vm_execute, execute_v2 as vm_execute_v2, execute_with_parameters, CallStack, - ClarityVersion, ContractContext, Environment, GlobalContext, LocalContext, Value, - MAX_EXECUTION_TIME_SECS, TEST_MAX_EXECUTION_TIME, + eval, execute as vm_execute, execute_v2 as vm_execute_v2, + execute_with_max_execution_time as vm_execute_with_max_execution_time, execute_with_parameters, + CallStack, ClarityVersion, ContractContext, Environment, GlobalContext, LocalContext, Value, }; #[test] @@ -1769,14 +1768,11 @@ fn test_chain_id() { } #[test] -#[serial] fn test_execution_time_expiration() { - TEST_MAX_EXECUTION_TIME.set(Duration::from_secs(0)); - assert_eq!( - vm_execute("(+ 1 1)").err().unwrap(), + vm_execute_with_max_execution_time("(+ 1 1)", Duration::from_secs(0)) + .err() + .unwrap(), CheckErrors::ExecutionTimeExpired.into() ); - - TEST_MAX_EXECUTION_TIME.set(Duration::from_secs(MAX_EXECUTION_TIME_SECS)); } From 9bdaaaac3b53fe405797509b0a333dc49d19e130 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 4 Mar 2025 14:58:47 +0100 Subject: [PATCH 047/238] removed useless new for TestFlag --- stacks-common/src/util/tests.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/stacks-common/src/util/tests.rs b/stacks-common/src/util/tests.rs index c941e444bbe..1b01a449be1 100644 --- a/stacks-common/src/util/tests.rs +++ b/stacks-common/src/util/tests.rs @@ -53,12 +53,6 @@ impl Default for TestFlag { } } -impl TestFlag { - pub fn new(initial_value: T) -> Self { - Self(Arc::new(Mutex::new(Some(initial_value)))) - } -} - impl TestFlag { /// Sets the value of the test flag. /// From 328821816dff8f78c0be33fd489d3ec651272433 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 4 Mar 2025 14:59:58 +0100 Subject: [PATCH 048/238] restore Cargo.lock --- Cargo.lock | 1434 +++++++++++++++++++++------------------------------- 1 file changed, 575 insertions(+), 859 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6728bf314a..a51010ecdf3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,18 +4,18 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] [[package]] -name = "adler2" -version = "2.0.0" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "adler32" @@ -79,30 +79,30 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.11" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" dependencies = [ "cfg-if 1.0.0", "once_cell", "version_check", - "zerocopy 0.7.35", + "zerocopy", ] [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.21" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] name = "android-tzdata" @@ -121,59 +121,57 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", - "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.7" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "once_cell", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] name = "anyhow" -version = "1.0.97" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "ascii" @@ -225,12 +223,13 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.3.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" dependencies = [ "concurrent-queue", - "event-listener-strategy", + "event-listener 5.0.0", + "event-listener-strategy 0.5.0", "futures-core", "pin-project-lite", ] @@ -241,20 +240,21 @@ version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c2886ab563af5038f79ec016dd7b87947ed138b794e8dd64992962c9cca0411" dependencies = [ - "async-lock 3.4.0", + "async-lock 3.3.0", "futures-io", ] [[package]] name = "async-executor" -version = "1.13.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" +checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" dependencies = [ + "async-lock 3.3.0", "async-task", "concurrent-queue", - "fastrand 2.3.0", - "futures-lite 2.6.0", + "fastrand 2.0.1", + "futures-lite 2.2.0", "slab", ] @@ -264,12 +264,12 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.2.0", "async-executor", - "async-io 2.4.0", - "async-lock 3.4.0", + "async-io 2.3.1", + "async-lock 3.3.0", "blocking", - "futures-lite 2.6.0", + "futures-lite 2.2.0", "once_cell", ] @@ -304,7 +304,7 @@ dependencies = [ "log", "parking", "polling 2.8.0", - "rustix 0.37.28", + "rustix 0.37.27", "slab", "socket2 0.4.10", "waker-fn", @@ -312,21 +312,21 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +checksum = "8f97ab0c5b00a7cdbe5a371b9a782ee7be1316095885c8a4ea1daf490eb0ef65" dependencies = [ - "async-lock 3.4.0", + "async-lock 3.3.0", "cfg-if 1.0.0", "concurrent-queue", "futures-io", - "futures-lite 2.6.0", + "futures-lite 2.2.0", "parking", - "polling 3.7.4", - "rustix 0.38.44", + "polling 3.4.0", + "rustix 0.38.31", "slab", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -340,31 +340,31 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.4.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ - "event-listener 5.4.0", - "event-listener-strategy", + "event-listener 4.0.3", + "event-listener-strategy 0.4.0", "pin-project-lite", ] [[package]] name = "async-std" -version = "1.13.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 2.4.0", - "async-lock 3.4.0", + "async-io 1.13.0", + "async-lock 2.8.0", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 2.6.0", + "futures-lite 1.13.0", "gloo-timers", "kv-log-macro", "log", @@ -378,9 +378,9 @@ dependencies = [ [[package]] name = "async-task" -version = "4.7.1" +version = "4.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" [[package]] name = "atomic-waker" @@ -388,11 +388,22 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi 0.3.9", +] + [[package]] name = "autocfg" -version = "1.4.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backoff" @@ -400,24 +411,24 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.12", "instant", "rand 0.8.5", ] [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", + "cc", "cfg-if 1.0.0", "libc", "miniz_oxide", "object", "rustc-demangle", - "windows-targets 0.52.6", ] [[package]] @@ -458,9 +469,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" [[package]] name = "block-buffer" @@ -482,22 +493,25 @@ dependencies = [ [[package]] name = "blocking" -version = "1.6.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.2.0", + "async-lock 3.3.0", "async-task", + "fastrand 2.0.1", "futures-io", - "futures-lite 2.6.0", + "futures-lite 2.2.0", "piper", + "tracing", ] [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byteorder" @@ -507,17 +521,17 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "cc" -version = "1.2.16" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ - "shlex", + "libc", ] [[package]] @@ -534,16 +548,16 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.40" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "wasm-bindgen", - "windows-link", + "windows-targets 0.52.0", ] [[package]] @@ -563,9 +577,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.31" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" +checksum = "80c21025abd42669a92efc996ef13cfb2c5c627858421ea58d5c3b331a6c134f" dependencies = [ "clap_builder", "clap_derive", @@ -573,9 +587,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.31" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" +checksum = "458bf1f341769dfcf849846f65dffdf9146daa56bcd2a47cb4e1de9915567c99" dependencies = [ "anstream", "anstyle", @@ -585,21 +599,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.28" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" +checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.58", ] [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "clarity" @@ -627,24 +641,25 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "colored" -version = "3.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" dependencies = [ - "windows-sys 0.59.0", + "lazy_static", + "windows-sys 0.48.0", ] [[package]] name = "concurrent-queue" -version = "2.5.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" dependencies = [ "crossbeam-utils", ] @@ -657,9 +672,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_fn" -version = "0.4.11" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f8a2ca5ac02d09563609681103aada9e1777d54fc57a5acd7a41404f9c93b6e" +checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" [[package]] name = "cookie" @@ -690,15 +705,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.7" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.17" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -711,18 +726,18 @@ checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "crossbeam-utils" -version = "0.8.21" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crypto-common" @@ -778,7 +793,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "rustc_version 0.4.1", + "rustc_version 0.4.0", "subtle", "zeroize", ] @@ -791,20 +806,20 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.58", ] [[package]] name = "data-encoding" -version = "2.8.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "der" -version = "0.7.9" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", "zeroize", @@ -874,17 +889,6 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" -[[package]] -name = "displaydoc" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.99", -] - [[package]] name = "ed25519" version = "2.2.3" @@ -913,27 +917,27 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.35" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "equivalent" -version = "1.0.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.10" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -944,9 +948,20 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.4.0" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "b72557800024fabbaa2449dd4bf24e37b93702d457a4d4f2b0dd1f0f039f20c1" dependencies = [ "concurrent-queue", "parking", @@ -955,11 +970,21 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" dependencies = [ - "event-listener 5.4.0", + "event-listener 4.0.3", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" +dependencies = [ + "event-listener 5.0.0", "pin-project-lite", ] @@ -998,15 +1023,15 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.3.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fiat-crypto" -version = "0.2.9" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" [[package]] name = "fnv" @@ -1047,9 +1072,9 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1078,9 +1103,9 @@ checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1110,11 +1135,11 @@ dependencies = [ [[package]] name = "futures-lite" -version = "2.6.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" dependencies = [ - "fastrand 2.3.0", + "fastrand 2.0.1", "futures-core", "futures-io", "parking", @@ -1129,7 +1154,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.58", ] [[package]] @@ -1146,9 +1171,9 @@ checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" -version = "3.0.3" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" @@ -1200,27 +1225,15 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if 1.0.0", "libc", "wasi 0.11.0+wasi-snapshot-preview1", ] -[[package]] -name = "getrandom" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", -] - [[package]] name = "ghash" version = "0.3.1" @@ -1233,15 +1246,15 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.1" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "gloo-timers" -version = "0.3.0" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" dependencies = [ "futures-channel", "futures-core", @@ -1260,7 +1273,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 0.2.12", + "http 0.2.11", "indexmap", "slab", "tokio", @@ -1270,16 +1283,16 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.8" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.2.0", + "http 1.1.0", "indexmap", "slab", "tokio", @@ -1289,9 +1302,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.5" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", ] @@ -1314,7 +1327,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.14.3", ] [[package]] @@ -1326,7 +1339,7 @@ dependencies = [ "base64 0.21.7", "bytes", "headers-core", - "http 0.2.12", + "http 0.2.11", "httpdate", "mime", "sha1 0.10.6", @@ -1338,26 +1351,29 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http 0.2.12", + "http 0.2.11", ] [[package]] name = "heck" -version = "0.5.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.9" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] [[package]] name = "hermit-abi" -version = "0.4.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" +checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" [[package]] name = "hkdf" @@ -1381,9 +1397,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.12" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", @@ -1392,9 +1408,9 @@ dependencies = [ [[package]] name = "http" -version = "1.2.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -1408,7 +1424,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http 0.2.12", + "http 0.2.11", "pin-project-lite", ] @@ -1419,7 +1435,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.2.0", + "http 1.1.0", ] [[package]] @@ -1430,7 +1446,7 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.2.0", + "http 1.1.0", "http-body 1.0.1", "pin-project-lite", ] @@ -1459,9 +1475,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.10.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -1471,22 +1487,22 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.32" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", "h2 0.3.26", - "http 0.2.12", + "http 0.2.11", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.8", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -1495,15 +1511,15 @@ dependencies = [ [[package]] name = "hyper" -version = "1.6.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.8", - "http 1.2.0", + "h2 0.4.6", + "http 1.1.0", "http-body 1.0.1", "httparse", "httpdate", @@ -1520,8 +1536,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http 0.2.12", - "hyper 0.14.32", + "http 0.2.11", + "hyper 0.14.28", "rustls", "tokio", "tokio-rustls", @@ -1529,24 +1545,24 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" dependencies = [ "bytes", "futures-util", - "http 1.2.0", + "http 1.1.0", "http-body 1.0.1", - "hyper 1.6.0", + "hyper 1.4.1", "pin-project-lite", "tokio", ] [[package]] name = "iana-time-zone" -version = "0.1.61" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1565,150 +1581,21 @@ dependencies = [ "cc", ] -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" - -[[package]] -name = "icu_properties" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", - "icu_properties_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" - -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", - "writeable", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.99", -] - [[package]] name = "idna" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" -dependencies = [ - "idna_adapter", - "smallvec", - "utf8_iter", -] - -[[package]] -name = "idna_adapter" -version = "1.2.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ - "icu_normalizer", - "icu_properties", + "unicode-bidi", + "unicode-normalization", ] [[package]] name = "indexmap" -version = "2.7.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -1722,9 +1609,9 @@ checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" [[package]] name = "instant" -version = "0.1.13" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if 1.0.0", ] @@ -1744,7 +1631,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.3.6", "libc", "windows-sys 0.48.0", ] @@ -1760,40 +1647,22 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" - -[[package]] -name = "is-terminal" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" -dependencies = [ - "hermit-abi 0.4.0", - "libc", - "windows-sys 0.59.0", -] - -[[package]] -name = "is_terminal_polyfill" -version = "1.70.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" dependencies = [ - "once_cell", "wasm-bindgen", ] @@ -1827,15 +1696,15 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.5.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.170" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libflate" @@ -1859,12 +1728,13 @@ dependencies = [ [[package]] name = "libredox" -version = "0.1.3" +version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.4.2", "libc", + "redox_syscall 0.4.1", ] [[package]] @@ -1927,15 +1797,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - -[[package]] -name = "litemap" -version = "0.7.5" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" @@ -1949,9 +1813,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.26" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" dependencies = [ "value-bag", ] @@ -1967,9 +1831,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memoffset" @@ -1988,9 +1852,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.5" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", @@ -1998,11 +1862,11 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.8.5" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ - "adler2", + "adler", ] [[package]] @@ -2026,13 +1890,13 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.3" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -2049,21 +1913,21 @@ dependencies = [ [[package]] name = "mockito" -version = "1.7.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7760e0e418d9b7e5777c0374009ca4c93861b9066f18cb334a20ce50ab63aa48" +checksum = "09b34bd91b9e5c5b06338d392463e1318d683cf82ec3d3af4014609be6e2108d" dependencies = [ "assert-json-diff 2.0.2", "bytes", "colored", "futures-util", - "http 1.2.0", + "http 1.1.0", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.4.1", "hyper-util", "log", - "rand 0.9.0", + "rand 0.8.5", "regex", "serde_json", "serde_urlencoded", @@ -2080,7 +1944,7 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http 0.2.12", + "http 0.2.11", "httparse", "log", "memchr", @@ -2137,33 +2001,52 @@ checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-traits" -version = "0.2.19" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", ] +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.6", + "libc", +] + +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +dependencies = [ + "libc", +] + [[package]] name = "object" -version = "0.36.7" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.20.3" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "opaque-debug" -version = "0.3.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "overload" @@ -2173,9 +2056,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking" -version = "2.2.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" @@ -2195,9 +2078,9 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall", + "redox_syscall 0.5.7", "smallvec", - "windows-targets 0.52.6", + "windows-targets 0.52.0", ] [[package]] @@ -2214,29 +2097,29 @@ checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" [[package]] name = "pin-project" -version = "1.1.10" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.10" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.58", ] [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -2246,12 +2129,12 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.4" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" dependencies = [ "atomic-waker", - "fastrand 2.3.0", + "fastrand 2.0.1", "futures-io", ] @@ -2267,9 +2150,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.32" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "polling" @@ -2289,17 +2172,16 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.4" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" dependencies = [ "cfg-if 1.0.0", "concurrent-queue", - "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.44", + "rustix 0.38.31", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -2331,12 +2213,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" -dependencies = [ - "zerocopy 0.7.35", -] +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-error" @@ -2370,9 +2249,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ "unicode-ident", ] @@ -2399,18 +2278,18 @@ checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "psm" -version = "0.1.25" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58e5423e24c18cc840e1c98370b3993c6649cd1678b4d24318bcf0a083cbe88" +checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874" dependencies = [ "cc", ] [[package]] name = "quote" -version = "1.0.39" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -2439,17 +2318,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "rand" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" -dependencies = [ - "rand_chacha 0.9.0", - "rand_core 0.9.3", - "zerocopy 0.8.21", -] - [[package]] name = "rand_chacha" version = "0.2.2" @@ -2470,16 +2338,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "rand_chacha" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" -dependencies = [ - "ppv-lite86", - "rand_core 0.9.3", -] - [[package]] name = "rand_core" version = "0.5.1" @@ -2495,57 +2353,57 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.12", ] [[package]] -name = "rand_core" -version = "0.9.3" +name = "rand_hc" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "getrandom 0.3.1", + "rand_core 0.5.1", ] [[package]] -name = "rand_hc" -version = "0.2.0" +name = "redox_syscall" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ - "rand_core 0.5.1", + "bitflags 1.3.2", ] [[package]] name = "redox_syscall" -version = "0.5.10" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.4.2", ] [[package]] name = "redox_users" -version = "0.4.6" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.12", "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.11.1" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.9", - "regex-syntax 0.8.5", + "regex-automata 0.4.5", + "regex-syntax 0.8.2", ] [[package]] @@ -2559,13 +2417,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.5", + "regex-syntax 0.8.2", ] [[package]] @@ -2576,9 +2434,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "relay-server" @@ -2589,9 +2447,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.27" +version = "0.11.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" dependencies = [ "base64 0.21.7", "bytes", @@ -2599,9 +2457,9 @@ dependencies = [ "futures-core", "futures-util", "h2 0.3.26", - "http 0.2.12", + "http 0.2.11", "http-body 0.4.6", - "hyper 0.14.32", + "hyper 0.14.28", "hyper-rustls", "ipnet", "js-sys", @@ -2630,16 +2488,16 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.11" +version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", - "cfg-if 1.0.0", - "getrandom 0.2.15", + "getrandom 0.2.12", "libc", + "spin 0.9.8", "untrusted", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -2675,7 +2533,7 @@ dependencies = [ "futures", "futures-timer", "rstest_macros", - "rustc_version 0.4.1", + "rustc_version 0.4.0", ] [[package]] @@ -2687,7 +2545,7 @@ dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", - "rustc_version 0.4.1", + "rustc_version 0.4.0", "syn 1.0.109", "unicode-ident", ] @@ -2700,7 +2558,7 @@ checksum = "45f80dcc84beab3a327bbe161f77db25f336a1452428176787c8c79ac79d7073" dependencies = [ "quote", "rand 0.8.5", - "rustc_version 0.4.1", + "rustc_version 0.4.0", "syn 1.0.109", ] @@ -2710,7 +2568,7 @@ version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.4.2", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2721,9 +2579,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" @@ -2736,18 +2594,18 @@ dependencies = [ [[package]] name = "rustc_version" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.26", + "semver 1.0.21", ] [[package]] name = "rustix" -version = "0.37.28" +version = "0.37.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "519165d378b97752ca44bbe15047d5d3409e875f39327546b42ac81d7e18c1b6" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" dependencies = [ "bitflags 1.3.2", "errno", @@ -2759,15 +2617,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.44" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.4.2", "errno", "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "linux-raw-sys 0.4.13", + "windows-sys 0.52.0", ] [[package]] @@ -2803,21 +2661,21 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "scc" -version = "2.3.3" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea091f6cac2595aa38993f04f4ee692ed43757035c36e67c180b6828356385b1" +checksum = "28e1c91382686d21b5ac7959341fcb9780fa7c03773646995a87c950fa7be640" dependencies = [ "sdd", ] @@ -2846,9 +2704,9 @@ dependencies = [ [[package]] name = "sdd" -version = "3.0.7" +version = "3.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07779b9b918cc05650cb30f404d4d7835d26df37c235eded8a6832e2fb82cca" +checksum = "478f121bb72bbf63c52c93011ea1791dca40140dfe13f8336c4c5ac952c33aa9" [[package]] name = "secp256k1" @@ -2880,9 +2738,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" [[package]] name = "semver-parser" @@ -2892,32 +2750,31 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.218" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.218" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.58", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ "itoa", - "memchr", "ryu", "serde", ] @@ -2935,9 +2792,9 @@ dependencies = [ [[package]] name = "serde_stacker" -version = "0.1.12" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69c8defe6c780725cce4ec6ad3bd91e321baf6fa4e255df1f31e345d507ef01a" +checksum = "babfccff5773ff80657f0ecf553c7c516bdc2eb16389c0918b36b73e7015276e" dependencies = [ "serde", "stacker", @@ -2977,7 +2834,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.58", ] [[package]] @@ -3002,9 +2859,9 @@ dependencies = [ [[package]] name = "sha1_smol" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" [[package]] name = "sha2" @@ -3033,9 +2890,9 @@ dependencies = [ [[package]] name = "sha2-asm" -version = "0.6.4" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" +checksum = "f27ba7066011e3fb30d808b51affff34f0a66d3a03a58edd787c6e420e40e44e" dependencies = [ "cc", ] @@ -3059,12 +2916,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - [[package]] name = "signature" version = "2.2.0" @@ -3076,9 +2927,9 @@ dependencies = [ [[package]] name = "similar" -version = "2.7.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" [[package]] name = "siphasher" @@ -3110,27 +2961,27 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.37", + "time 0.3.36", ] [[package]] name = "slog-term" -version = "2.9.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e022d0b998abfe5c3782c1f03551a596269450ccd677ea51c56f8b214610e8" +checksum = "87d29185c55b7b258b4f120eab00f48557d4d9bc814f41713f449d35b0f8977c" dependencies = [ - "is-terminal", + "atty", "slog", "term", "thread_local", - "time 0.3.37", + "time 0.3.36", ] [[package]] name = "smallvec" -version = "1.14.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "socket2" @@ -3144,12 +2995,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -3174,23 +3025,17 @@ dependencies = [ "der", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "stacker" -version = "0.1.19" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9156ebd5870ef293bfb43f91c7a74528d363ec0d424afe24160ed5a4343d08a" +checksum = "c886bd4480155fd3ef527d45e9ac8dd7118a898a46530b7b94c3e21866259fce" dependencies = [ "cc", "cfg-if 1.0.0", "libc", "psm", - "windows-sys 0.59.0", + "winapi 0.3.9", ] [[package]] @@ -3361,9 +3206,9 @@ dependencies = [ [[package]] name = "stdext" -version = "0.3.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4af28eeb7c18ac2dbdb255d40bee63f203120e1db6b0024b177746ebec7049c1" +checksum = "6012f6ef4d674ce7021a8b0f5093f7e339f54d4ba04fc1f9c901659459b4f35b" [[package]] name = "stdweb" @@ -3416,9 +3261,9 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] name = "strsim" -version = "0.11.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" [[package]] name = "stx-genesis" @@ -3430,9 +3275,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.6.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" @@ -3447,9 +3292,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.99" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -3462,17 +3307,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" -[[package]] -name = "synstructure" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.99", -] - [[package]] name = "system-configuration" version = "0.5.1" @@ -3496,16 +3330,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.17.1" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e5a0acb1f3f55f65cc4a866c361b2fb2a0ff6366785ae6fbb5f85df07ba230" +checksum = "b8fcd239983515c23a32fb82099f97d0b11b8c72f654ed659363a95c3dad7a53" dependencies = [ "cfg-if 1.0.0", - "fastrand 2.3.0", - "getrandom 0.3.1", + "fastrand 2.0.1", "once_cell", - "rustix 0.38.44", - "windows-sys 0.59.0", + "rustix 0.38.31", + "windows-sys 0.52.0", ] [[package]] @@ -3521,29 +3354,29 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.69" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.69" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.58", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -3586,17 +3419,19 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", + "libc", "num-conv", + "num_threads", "powerfmt", "serde", "time-core", - "time-macros 0.2.19", + "time-macros 0.2.18", ] [[package]] @@ -3617,9 +3452,9 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -3651,29 +3486,35 @@ dependencies = [ ] [[package]] -name = "tinystr" -version = "0.7.6" +name = "tinyvec" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ - "displaydoc", - "zerovec", + "tinyvec_macros", ] +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" -version = "1.43.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", "libc", - "mio 1.0.3", + "mio 0.8.11", + "num_cpus", "parking_lot", "pin-project-lite", - "socket2 0.5.8", - "windows-sys 0.52.0", + "socket2 0.5.5", + "windows-sys 0.48.0", ] [[package]] @@ -3686,11 +3527,22 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-tungstenite" -version = "0.21.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", @@ -3700,15 +3552,16 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", + "tracing", ] [[package]] @@ -3722,15 +3575,15 @@ dependencies = [ [[package]] name = "tower-service" -version = "0.3.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", "pin-project-lite", @@ -3740,20 +3593,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.58", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -3772,9 +3625,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", @@ -3796,14 +3649,14 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tungstenite" -version = "0.21.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" dependencies = [ "byteorder", "bytes", "data-encoding", - "http 1.2.0", + "http 0.2.11", "httparse", "log", "rand 0.8.5", @@ -3815,21 +3668,39 @@ dependencies = [ [[package]] name = "typenum" -version = "1.18.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicase" -version = "2.8.1" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] [[package]] name = "universal-hash" @@ -3849,9 +3720,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.4" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", "idna", @@ -3865,35 +3736,23 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - [[package]] name = "utf8parse" -version = "0.2.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "valuable" -version = "0.1.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.10.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" +checksum = "126e423afe2dd9ac52142e7e9d5ce4135d7e13776c529d27fd6bc49f19e3280b" [[package]] name = "vcpkg" @@ -3903,15 +3762,15 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "waker-fn" -version = "1.2.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" [[package]] name = "want" @@ -3924,27 +3783,29 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.7" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4378d202ff965b011c64817db11d5829506d3404edeadb61f190d111da3f231c" +checksum = "c1e92e22e03ff1230c03a1a8ee37d2f89cd489e2e541b7550d6afad96faed169" dependencies = [ "bytes", "futures-channel", "futures-util", "headers", - "http 0.2.12", - "hyper 0.14.32", + "http 0.2.11", + "hyper 0.14.28", "log", "mime", "mime_guess", "multer", "percent-encoding", "pin-project", + "rustls-pemfile", "scoped-tls", "serde", "serde_json", "serde_urlencoded", "tokio", + "tokio-stream", "tokio-tungstenite", "tokio-util", "tower-service", @@ -3963,59 +3824,48 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" -[[package]] -name = "wasi" -version = "0.13.3+wasi-0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" -dependencies = [ - "wit-bindgen-rt", -] - [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" dependencies = [ "cfg-if 1.0.0", - "once_cell", - "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" dependencies = [ "bumpalo", "log", + "once_cell", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.58", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" dependencies = [ "cfg-if 1.0.0", "js-sys", - "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4023,31 +3873,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" -dependencies = [ - "unicode-ident", -] +checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" dependencies = [ "js-sys", "wasm-bindgen", @@ -4099,15 +3946,9 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.52.0", ] -[[package]] -name = "windows-link" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" - [[package]] name = "windows-sys" version = "0.48.0" @@ -4123,16 +3964,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.52.0", ] [[package]] @@ -4152,18 +3984,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.6" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] [[package]] @@ -4174,9 +4005,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.6" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" @@ -4186,9 +4017,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.6" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" @@ -4198,15 +4029,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" @@ -4216,9 +4041,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.6" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" @@ -4228,9 +4053,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.6" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" @@ -4240,9 +4065,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.6" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" @@ -4252,9 +4077,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.6" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winreg" @@ -4266,27 +4091,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "wit-bindgen-rt" -version = "0.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" -dependencies = [ - "bitflags 2.9.0", -] - -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - [[package]] name = "ws2_32-sys" version = "0.2.1" @@ -4297,116 +4101,28 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "yoke" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.99", - "synstructure", -] - [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ - "byteorder", - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf01143b2dd5d134f11f545cf9f1431b13b749695cb33bcce051e7568f99478" -dependencies = [ - "zerocopy-derive 0.8.21", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.99", + "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712c8386f4f4299382c9abee219bee7084f78fb939d88b6840fcc1320d5f6da2" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.99", -] - -[[package]] -name = "zerofrom" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.6" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", - "synstructure", + "syn 2.0.58", ] [[package]] name = "zeroize" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" - -[[package]] -name = "zerovec" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.3" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.99", -] +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" From 2c0e174b00bf8214472d1d6bc7c752e2ccc69ef4 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 4 Mar 2025 08:17:41 -0800 Subject: [PATCH 049/238] use dockerfile in repo root --- .github/workflows/image-build-source.yml | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/.github/workflows/image-build-source.yml b/.github/workflows/image-build-source.yml index e45455f05b6..ae42965a620 100644 --- a/.github/workflows/image-build-source.yml +++ b/.github/workflows/image-build-source.yml @@ -21,13 +21,6 @@ jobs: image: name: Build Image runs-on: ubuntu-latest - strategy: - fail-fast: false - ## Build a maximum of 2 images concurrently based on matrix.dist - max-parallel: 2 - matrix: - dist: - - debian steps: ## Setup Docker for the builds - name: Docker setup @@ -63,7 +56,7 @@ jobs: id: docker_build uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 with: - file: ./.github/actions/dockerfiles/Dockerfile.${{matrix.dist}}-source + file: ./Dockerfile platforms: ${{ env.docker_platforms }} tags: ${{ steps.docker_metadata.outputs.tags }} labels: ${{ steps.docker_metadata.outputs.labels }} From 14fde1a39282d0cf18eb8ebf5307a0e379899752 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 4 Mar 2025 08:35:10 -0800 Subject: [PATCH 050/238] use rust 1.84 in dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 83270f3997a..9d536f3d1ea 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:bookworm AS build +FROM rust:1.84-bookworm AS build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' From 1f3462f91c0e1087555df64b8ad90aa39865327c Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 4 Mar 2025 08:45:11 -0800 Subject: [PATCH 051/238] install rust toolchain --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 9d536f3d1ea..a65dc455447 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.84-bookworm AS build +FROM rust:bookworm AS build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' @@ -10,6 +10,7 @@ COPY . . RUN mkdir /out +RUN rustup toolchain install RUN cargo build --features monitoring_prom,slog_json --release RUN cp target/release/stacks-node /out From 1f707ed7f021163111d88dccf4d4ae8e47bb2ee9 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 4 Mar 2025 08:47:56 -0800 Subject: [PATCH 052/238] rust toolchain requires an arg --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a65dc455447..1b8ef934c97 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,7 +10,7 @@ COPY . . RUN mkdir /out -RUN rustup toolchain install +RUN rustup toolchain install stable RUN cargo build --features monitoring_prom,slog_json --release RUN cp target/release/stacks-node /out From bd095557d1c5dd7e3c76cb6b3de5550ff4b56ea4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 4 Mar 2025 13:39:43 -0500 Subject: [PATCH 053/238] fix: allow a tenure extend during the expected second Allow a tenure extend to happen when the current timestamp is equal to the calculated extend time, not only when it is greater. --- stacks-signer/src/chainstate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index f6184192122..8654450738a 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -402,7 +402,7 @@ impl SortitionsView { false, ); let epoch_time = get_epoch_time_secs(); - let enough_time_passed = epoch_time > extend_timestamp; + let enough_time_passed = epoch_time >= extend_timestamp; if !changed_burn_view && !enough_time_passed { warn!( "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; From 772a09ab2e5e0ab24adc96e061623dd1a80f206e Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 4 Mar 2025 11:01:22 -0800 Subject: [PATCH 054/238] Adding workflow to create release PR's ref stacks-network/actions/pulls/66 --- .github/workflows/github-release.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 5028d35968a..5da93df0a6d 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -142,3 +142,21 @@ jobs: DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} dist: ${{ matrix.dist }} + + ## Create the downstream PR for the release branch to master,develop + create-pr: + if: | + inputs.node_tag != '' || + inputs.signer_tag != '' + name: Create Downstream PR (${{ github.ref_name }}) + runs-on: ubuntu-latest + needs: + - build-binaries + - create-release + - docker-image + steps: + - name: Open Downstream PR + id: create-pr + uses: stacks-network/actions/stacks-core/release/downstream-pr@main + with: + token: ${{ secrets.GH_TOKEN }} From 82b9b6811ba6ec765c4df1fd40012f7dda29c561 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 4 Mar 2025 13:55:20 -0500 Subject: [PATCH 055/238] test: reduce timeouts and flakiness in `idle_tenure_extend_active_mining` --- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f85be74d095..600f33bf5d5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -3606,7 +3606,7 @@ fn idle_tenure_extend_active_mining() { let amount = deploy_fee + tx_fee * num_txs * tenure_count * num_naka_blocks * 100 + 100 * tenure_count; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let idle_timeout = Duration::from_secs(60); + let idle_timeout = Duration::from_secs(30); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, amount), (deployer_addr, amount)], @@ -3793,7 +3793,7 @@ fn idle_tenure_extend_active_mining() { ); // Now, wait for the idle timeout to trigger - wait_for(extend_diff + 30, || { + wait_for(idle_timeout.as_secs() * 2, || { Ok(last_block_contains_tenure_change_tx( TenureChangeCause::Extended, )) From c6b0866de56fc384ca4b8f5fcef3a29bff7d6944 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 28 Feb 2025 17:54:45 -0500 Subject: [PATCH 056/238] feat: allow signers to reconsider some rejected blocks Allows a signer to reconsider a block that it previously rejected if it was rejected for certain reasons that may resolve themselves, for example a testing directive, the parent block was unknown, or there was a communication failure with the stacks-node. Resolves #5856 --- libsigner/src/v0/messages.rs | 10 +- stacks-signer/src/signerdb.rs | 3 + stacks-signer/src/v0/signer.rs | 85 +++++++++--- stacks-signer/src/v0/tests.rs | 3 + .../src/tests/nakamoto_integrations.rs | 2 + testnet/stacks-node/src/tests/signer/v0.rs | 130 ++++++++++++++++++ 6 files changed, 207 insertions(+), 26 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index ab3c45c6d21..e9234cf5e4c 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -51,7 +51,7 @@ use clarity::util::hash::Sha256Sum; use clarity::util::retry::BoundReader; use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::serialization::SerializationError; -use clarity::vm::types::{QualifiedContractIdentifier, TupleData}; +use clarity::vm::types::{QualifiedContractIdentifier, ResponseData, TupleData}; use clarity::vm::Value; use hashbrown::{HashMap, HashSet}; use serde::{Deserialize, Serialize}; @@ -875,11 +875,11 @@ impl BlockResponse { } } - /// The signer signature hash for the block response - pub fn signer_signature_hash(&self) -> Sha512Trunc256Sum { + /// Get the block response data from the block response + pub fn get_response_data(&self) -> &BlockResponseData { match self { - BlockResponse::Accepted(accepted) => accepted.signer_signature_hash, - BlockResponse::Rejected(rejection) => rejection.signer_signature_hash, + BlockResponse::Accepted(accepted) => &accepted.response_data, + BlockResponse::Rejected(rejection) => &rejection.response_data, } } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index aa9b024643b..0ddb0b10620 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -167,6 +167,8 @@ pub struct BlockInfo { pub validation_time_ms: Option, /// Extra data specific to v0, v1, etc. pub ext: ExtraBlockInfo, + /// If this signer rejected this block, what was the reason + pub reject_reason: Option, } impl From for BlockInfo { @@ -184,6 +186,7 @@ impl From for BlockInfo { ext: ExtraBlockInfo::default(), state: BlockState::Unprocessed, validation_time_ms: None, + reject_reason: None, } } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index de4718f5c78..bb2a6e678ca 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -21,7 +21,8 @@ use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::net::api::postblock_proposal::{ - BlockValidateOk, BlockValidateReject, BlockValidateResponse, TOO_MANY_REQUESTS_STATUS, + BlockValidateOk, BlockValidateReject, BlockValidateResponse, ValidateRejectCode, + TOO_MANY_REQUESTS_STATUS, }; use blockstack_lib::util_lib::db::Error as DBError; use clarity::types::chainstate::StacksPrivateKey; @@ -391,6 +392,7 @@ impl Signer { ), ) } + /// Create a block rejection response for a block with the given reject code pub fn create_block_rejection( &self, @@ -411,6 +413,7 @@ impl Signer { ), ) } + /// Check if block should be rejected based on sortition state /// Will return a BlockResponse::Rejection if the block is invalid, none otherwise. fn check_block_against_sortition_state( @@ -561,32 +564,37 @@ impl Signer { // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); if let Some(block_info) = self.block_lookup_by_reward_cycle(&signer_signature_hash) { - let Some(block_response) = self.determine_response(&block_info) else { - // We are still waiting for a response for this block. Do nothing. - debug!("{self}: Received a block proposal for a block we are already validating."; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id() - ); - return; - }; - // Submit a proposal response to the .signers contract for miners - debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); - let accepted = matches!(block_response, BlockResponse::Accepted(..)); - match self - .stackerdb - .send_message_with_retry::(block_response.into()) - { - Ok(_) => { + if should_reevaluate_block(&block_info) { + // Treat this case the same as if no block info was found + } else { + let Some(block_response) = self.determine_response(&block_info) else { + // We are still waiting for a response for this block. Do nothing. + debug!( + "{self}: Received a block proposal for a block we are already validating."; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id() + ); + return; + }; + + // Submit a proposal response to the .signers contract for miners + debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); + + let accepted = matches!(block_response, BlockResponse::Accepted(..)); + if let Err(e) = self + .stackerdb + .send_message_with_retry::(block_response.into()) + { + warn!("{self}: Failed to send block response to stacker-db: {e:?}"); + } else { crate::monitoring::actions::increment_block_responses_sent(accepted); crate::monitoring::actions::record_block_response_latency( &block_proposal.block, ); } - Err(e) => { - warn!("{self}: Failed to send block response to stacker-db: {e:?}",); - } + + return; } - return; } info!( @@ -890,6 +898,8 @@ impl Signer { false, ), ); + + block_info.reject_reason = Some(block_rejection.response_data.reject_reason.clone()); self.signer_db .insert_block(&block_info) .unwrap_or_else(|e| self.handle_insert_block_error(e)); @@ -1019,6 +1029,7 @@ impl Signer { ), &block_info.block, ); + block_info.reject_reason = Some(rejection.get_response_data().reject_reason.clone()); if let Err(e) = block_info.mark_locally_rejected() { if !block_info.has_reached_consensus() { warn!("{self}: Failed to mark block as locally rejected: {e:?}"); @@ -1039,6 +1050,7 @@ impl Signer { ), Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), } + self.signer_db .insert_block(&block_info) .unwrap_or_else(|e| self.handle_insert_block_error(e)); @@ -1132,6 +1144,7 @@ impl Signer { ) { warn!("{self}: Failed to save block rejection signature: {e:?}",); } + block_info.reject_reason = Some(rejection.response_data.reject_reason.clone()); // do we have enough signatures to mark a block a globally rejected? // i.e. is (set-size) - (threshold) + 1 reached. @@ -1412,3 +1425,33 @@ impl Signer { } } } + +/// Determine if a block should be re-evaluated based on its rejection reason˝ +fn should_reevaluate_block(block_info: &BlockInfo) -> bool { + if let Some(reject_reason) = &block_info.reject_reason { + match reject_reason { + RejectReason::ValidationFailed(ValidateRejectCode::UnknownParent) + | RejectReason::NoSortitionView + | RejectReason::ConnectivityIssues(_) + | RejectReason::TestingDirective + | RejectReason::NotRejected + | RejectReason::Unknown(_) => true, + RejectReason::ValidationFailed(_) + | RejectReason::RejectedInPriorRound + | RejectReason::SortitionViewMismatch + | RejectReason::ReorgNotAllowed + | RejectReason::InvalidBitvec + | RejectReason::PubkeyHashMismatch + | RejectReason::InvalidMiner + | RejectReason::NotLatestSortitionWinner + | RejectReason::InvalidParentBlock + | RejectReason::DuplicateBlockFound + | RejectReason::InvalidTenureExtend => { + // No need to re-validate these types of rejections. + false + } + } + } else { + false + } +} diff --git a/stacks-signer/src/v0/tests.rs b/stacks-signer/src/v0/tests.rs index 06ccf7ee22a..9ea494f808b 100644 --- a/stacks-signer/src/v0/tests.rs +++ b/stacks-signer/src/v0/tests.rs @@ -92,6 +92,9 @@ impl Signer { warn!("{self}: Failed to mark block as locally rejected: {e:?}"); } }; + + block_info.reject_reason = Some(RejectReason::TestingDirective); + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject // as invalid since we rejected in a prior round if this crops up again) // in case this is the first time we saw this block. Safe to do since this is testing case only. diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ebd2bc5c4e5..2aa232935cc 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6642,6 +6642,7 @@ fn signer_chainstate() { ext: ExtraBlockInfo::None, state: BlockState::Unprocessed, validation_time_ms: None, + reject_reason: None, }) .unwrap(); @@ -6722,6 +6723,7 @@ fn signer_chainstate() { ext: ExtraBlockInfo::None, state: BlockState::GloballyAccepted, validation_time_ms: Some(1000), + reject_reason: None, }) .unwrap(); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 600f33bf5d5..fb0e354f12a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12305,3 +12305,133 @@ fn retry_proposal() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test verifies that a a signer will accept a rejected block if it is +/// re-proposed and determined to be legitimate. This can happen if the block +/// is initially rejected due to a test flag or because the stacks-node had +/// not yet processed the block's parent. +fn signer_can_accept_rejected_block() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * 3)], + |_| {}, + |config| { + config.miner.block_rejection_timeout_steps.clear(); + config + .miner + .block_rejection_timeout_steps + .insert(0, Duration::from_secs(123)); + config + .miner + .block_rejection_timeout_steps + .insert(10, Duration::from_secs(20)); + config + .miner + .block_rejection_timeout_steps + .insert(15, Duration::from_secs(10)); + config + .miner + .block_rejection_timeout_steps + .insert(20, Duration::from_secs(30)); + }, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + let proposed_blocks = signer_test + .running_nodes + .counters + .naka_proposed_blocks + .clone(); + + signer_test.mine_nakamoto_block(Duration::from_secs(60), true); + + let info = get_chain_info(&signer_test.running_nodes.conf); + let block_height_before = info.stacks_tip_height; + + // make signer[0] reject all proposals + let rejecting_signer = + StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[0]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![rejecting_signer]); + + // make signer[1] ignore all proposals + let ignoring_signer = StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[1]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![ignoring_signer]); + + let proposals_before = proposed_blocks.load(Ordering::SeqCst); + + // submit a tx so that the miner will mine a block + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(60, || { + if proposed_blocks.load(Ordering::SeqCst) > proposals_before { + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for block proposal"); + + info!( + "Block proposed, submitting another transaction that should not get included in the block" + ); + let transfer_tx = make_stacks_transfer( + &sender_sk, + 1, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Disable signer 0 from rejecting proposals"); + test_observer::clear(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); + + info!("Waiting for the block to be approved"); + wait_for(60, || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().expect("No blocks found"); + let height = last_block["block_height"].as_u64().unwrap(); + if height > block_height_before { + return Ok(true); + } + Ok(false) + }) + .expect("Timed out waiting for block"); + + // Ensure that the block was the original block with just 1 transfer + let blocks = test_observer::get_blocks(); + let block = blocks.last().expect("No blocks found"); + assert_eq!(transfers_in_block(block), 1); + + signer_test.shutdown(); +} From 34d3282801a7cb415c488fb6886750a35c8fa7b6 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 4 Mar 2025 14:30:03 -0500 Subject: [PATCH 057/238] refactor: avoid duplication in `send_block_response` --- stacks-signer/src/v0/signer.rs | 40 +++++++++++++++------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index de4718f5c78..8b9c347e508 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -478,6 +478,22 @@ impl Signer { } } + /// The actual `send_block_response` implementation. Declared so that we do + /// not need to duplicate in testing. + fn impl_send_block_response(&mut self, block_response: BlockResponse) { + let res = self + .stackerdb + .send_message_with_retry::(block_response.clone().into()); + match res { + Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), + Ok(ack) if !ack.accepted => warn!( + "{self}: Block rejection not accepted by stacker-db: {:?}", + ack.reason + ), + Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), + } + } + #[cfg(any(test, feature = "testing"))] fn send_block_response(&mut self, block_response: BlockResponse) { const NUM_REPEATS: usize = 1; @@ -489,17 +505,7 @@ impl Signer { count = NUM_REPEATS; } while count <= NUM_REPEATS { - let res = self - .stackerdb - .send_message_with_retry::(block_response.clone().into()); - match res { - Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), - Ok(ack) if !ack.accepted => warn!( - "{self}: Block rejection not accepted by stacker-db: {:?}", - ack.reason - ), - Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), - } + self.impl_send_block_response(block_response.clone()); count += 1; sleep_ms(1000); @@ -508,17 +514,7 @@ impl Signer { #[cfg(not(any(test, feature = "testing")))] fn send_block_response(&mut self, block_response: BlockResponse) { - let res = self - .stackerdb - .send_message_with_retry::(block_response.clone().into()); - match res { - Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), - Ok(ack) if !ack.accepted => warn!( - "{self}: Block rejection not accepted by stacker-db: {:?}", - ack.reason - ), - Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), - } + self.impl_send_block_response(block_response) } /// Handle block proposal messages submitted to signers stackerdb From 346e823d6f9688e85c2e4a2faa33661265b13f5a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 4 Mar 2025 14:32:32 -0500 Subject: [PATCH 058/238] chore: add changelog entry --- CHANGELOG.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea39bd441ef..9545caf061f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,13 +7,17 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Changed + +- When a miner times out waiting for signatures, it will re-propose the same block instead of building a new block ([#5877](https://github.com/stacks-network/stacks-core/pull/5877)) + ## [3.1.0.0.7] -## Added +### Added - Add `disable_retries` mode for events_observer disabling automatic retry on error -## Changed +### Changed - Implement faster cost tracker for default cost functions in Clarity - By default, miners will wait for a new tenure to start for a configurable amount of time after receiving a burn block before @@ -23,7 +27,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [3.1.0.0.6] -## Added +### Added - The `BlockProposal` StackerDB message serialization struct now includes a `server_version` string, which represents the version of the node that the miner is using. ([#5803](https://github.com/stacks-network/stacks-core/pull/5803)) - Add `vrf_seed` to the `/v3/sortitions` rpc endpoint From 962aecfaf754eb1be875cce4073929d6974b8d3f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 4 Mar 2025 14:38:14 -0500 Subject: [PATCH 059/238] chore: add changelog entry --- stacks-signer/CHANGELOG.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 59fe85aee6e..35ae5d07c40 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -7,9 +7,13 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Changed + +- For some rejection reasons, a signer will reconsider a block proposal that it previously rejected ([#5880](https://github.com/stacks-network/stacks-core/pull/5880)) + ## [3.1.0.0.7.0] -## Changed +### Changed - Add new reject codes to the signer response for better visibility into why a block was rejected. - When allowing a reorg within the `reorg_attempts_activity_timeout_ms`, the signer will now watch the responses from other signers and if >30% of them reject this reorg attempt, then the signer will mark the miner as invalid, reject further attempts to reorg and allow the previous miner to extend their tenure. @@ -20,7 +24,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [3.1.0.0.6.0] -## Added +### Added - Introduced the `reorg_attempts_activity_timeout_ms` configuration option for signers which is used to determine the length of time after the last block of a tenure is confirmed that an incoming miner's attempts to reorg it are considered valid miner activity. - Add signer configuration option `tenure_idle_timeout_buffer_secs` to specify the number of seconds of buffer the signer will add to its tenure extend time that it sends to miners. The idea is to allow for some clock skew between the miner and signers, preventing the case where the miner attempts to tenure extend too early. From dfaf38510c4120e4642ed65c0b4e48771b0f21e5 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 4 Mar 2025 15:39:35 -0500 Subject: [PATCH 060/238] test: fix flakiness in `signer_can_accept_rejected_block` --- testnet/stacks-node/src/tests/signer/v0.rs | 59 ++++++++++++---------- 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fb0e354f12a..edfdd5f7de6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12355,13 +12355,10 @@ fn signer_can_accept_rejected_block() { None, ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - signer_test.boot_to_epoch_3(); + let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); + let miner_pk = StacksPublicKey::from_private(&miner_sk); - let proposed_blocks = signer_test - .running_nodes - .counters - .naka_proposed_blocks - .clone(); + signer_test.boot_to_epoch_3(); signer_test.mine_nakamoto_block(Duration::from_secs(60), true); @@ -12377,7 +12374,8 @@ fn signer_can_accept_rejected_block() { let ignoring_signer = StacksPublicKey::from_private(&signer_test.signer_stacks_private_keys[1]); TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![ignoring_signer]); - let proposals_before = proposed_blocks.load(Ordering::SeqCst); + // Stall block validation so we can ensure the timing we want to test + TEST_VALIDATE_STALL.set(true); // submit a tx so that the miner will mine a block let transfer_tx = make_stacks_transfer( @@ -12391,13 +12389,19 @@ fn signer_can_accept_rejected_block() { submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal"); - wait_for(60, || { - if proposed_blocks.load(Ordering::SeqCst) > proposals_before { - return Ok(true); - } - Ok(false) - }) - .expect("Timed out waiting for block proposal"); + let block = wait_for_block_proposal(30, block_height_before + 1, &miner_pk) + .expect("Timed out waiting for block proposal"); + + // Wait for signer[0] to reject the block + wait_for_block_rejections(30, block.header.signer_signature_hash(), 1) + .expect("Failed to get expected rejections for Miner 1's block"); + + info!("Disable signer 0 from rejecting proposals"); + test_observer::clear(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); + + // Unstall the other signers + TEST_VALIDATE_STALL.set(false); info!( "Block proposed, submitting another transaction that should not get included in the block" @@ -12412,26 +12416,25 @@ fn signer_can_accept_rejected_block() { ); submit_tx(&http_origin, &transfer_tx); - info!("Disable signer 0 from rejecting proposals"); - test_observer::clear(); - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); - info!("Waiting for the block to be approved"); wait_for(60, || { let blocks = test_observer::get_blocks(); - let last_block = blocks.last().expect("No blocks found"); - let height = last_block["block_height"].as_u64().unwrap(); - if height > block_height_before { - return Ok(true); + + // Look for a block with height `block_height_before + 1` + if let Some(block) = blocks + .iter() + .find(|block| block["block_height"].as_u64() == Some(block_height_before + 1)) + { + if transfers_in_block(block) == 1 { + Ok(true) // Success: found the block with exactly 1 transfer + } else { + Err("Transfer included in block".into()) // Found the block, but it has the wrong number of transfers + } + } else { + Ok(false) // Keep waiting if the block hasn't appeared yet } - Ok(false) }) .expect("Timed out waiting for block"); - // Ensure that the block was the original block with just 1 transfer - let blocks = test_observer::get_blocks(); - let block = blocks.last().expect("No blocks found"); - assert_eq!(transfers_in_block(block), 1); - signer_test.shutdown(); } From ee05841da6fd1af40f31829ba51d8d26b584627d Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 4 Mar 2025 14:15:56 -0800 Subject: [PATCH 061/238] Updating Dockerfile to report version data --- .dockerignore | 11 ----------- .github/workflows/image-build-source.yml | 2 +- Dockerfile | 10 ++-------- 3 files changed, 3 insertions(+), 20 deletions(-) diff --git a/.dockerignore b/.dockerignore index aa66cbcb378..ac0df83528a 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,14 +1,3 @@ Dockerfile* target -integration_tests/blockstack-consensus-data/ -integration_tests/test-out/ -api/data -.git -.venv .dockerignore -testnet/index.html -testnet.log -testnet-logs* -legacy -build-scripts -dist \ No newline at end of file diff --git a/.github/workflows/image-build-source.yml b/.github/workflows/image-build-source.yml index 31cd40b360e..3cac2c6cb83 100644 --- a/.github/workflows/image-build-source.yml +++ b/.github/workflows/image-build-source.yml @@ -25,7 +25,7 @@ jobs: ## Setup Docker for the builds - name: Docker setup id: docker_setup - uses: stacks-network/actions/docker@main + uses: wileyj/actions/docker@main with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} diff --git a/Dockerfile b/Dockerfile index 1b8ef934c97..ca03fa3ac60 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,18 +5,12 @@ ARG GIT_BRANCH='No Branch Info' ARG GIT_COMMIT='No Commit Info' WORKDIR /src - COPY . . - RUN mkdir /out - RUN rustup toolchain install stable RUN cargo build --features monitoring_prom,slog_json --release - -RUN cp target/release/stacks-node /out +RUN cp -R target/release/. /out FROM debian:bookworm-slim - -COPY --from=build /out/ /bin/ - +COPY --from=build /out/stacks-node /out/stacks-signer /out/stacks-inspect /bin/ CMD ["stacks-node", "mainnet"] From fdc9bc63f5ce5cfe5b070a0ed0656e03e0c43cea Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 4 Mar 2025 17:22:48 -0500 Subject: [PATCH 062/238] feat: reevaluate `InvalidTenureExtend` rejection This allows a miner that proposes a block with a time-based tenure extend to re-propose it and get it approved if initially, some signers thought it was too early. --- stacks-signer/src/v0/signer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 502eea17fcc..4525cc966d0 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -1430,6 +1430,7 @@ fn should_reevaluate_block(block_info: &BlockInfo) -> bool { | RejectReason::NoSortitionView | RejectReason::ConnectivityIssues(_) | RejectReason::TestingDirective + | RejectReason::InvalidTenureExtend | RejectReason::NotRejected | RejectReason::Unknown(_) => true, RejectReason::ValidationFailed(_) @@ -1441,8 +1442,7 @@ fn should_reevaluate_block(block_info: &BlockInfo) -> bool { | RejectReason::InvalidMiner | RejectReason::NotLatestSortitionWinner | RejectReason::InvalidParentBlock - | RejectReason::DuplicateBlockFound - | RejectReason::InvalidTenureExtend => { + | RejectReason::DuplicateBlockFound => { // No need to re-validate these types of rejections. false } From e02498c8addf3e068452741558e23b18b953542d Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 4 Mar 2025 14:42:56 -0800 Subject: [PATCH 063/238] use the stacks-network composite workflow --- .github/workflows/image-build-source.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/image-build-source.yml b/.github/workflows/image-build-source.yml index 3cac2c6cb83..31cd40b360e 100644 --- a/.github/workflows/image-build-source.yml +++ b/.github/workflows/image-build-source.yml @@ -25,7 +25,7 @@ jobs: ## Setup Docker for the builds - name: Docker setup id: docker_setup - uses: wileyj/actions/docker@main + uses: stacks-network/actions/docker@main with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} From 4258dd4fc1f45ac5a07fb956faac882715d8dabc Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 4 Mar 2025 14:58:03 -0800 Subject: [PATCH 064/238] Removing unused Dockerfile Dockerfile.debian-source --- .../dockerfiles/Dockerfile.debian-source | 28 ------------------- 1 file changed, 28 deletions(-) delete mode 100644 .github/actions/dockerfiles/Dockerfile.debian-source diff --git a/.github/actions/dockerfiles/Dockerfile.debian-source b/.github/actions/dockerfiles/Dockerfile.debian-source deleted file mode 100644 index 80c434e8d5f..00000000000 --- a/.github/actions/dockerfiles/Dockerfile.debian-source +++ /dev/null @@ -1,28 +0,0 @@ -FROM rust:bookworm as build - -ARG STACKS_NODE_VERSION="No Version Info" -ARG GIT_BRANCH='No Branch Info' -ARG GIT_COMMIT='No Commit Info' -ARG BUILD_DIR=/build -ARG TARGET=x86_64-unknown-linux-gnu -# Allow us to override the default `--target-cpu` for the given target triplet -ARG TARGET_CPU -ENV RUSTFLAGS="${TARGET_CPU:+${RUSTFLAGS} -Ctarget-cpu=${TARGET_CPU}}" -WORKDIR /src - -COPY . . - -RUN apt-get update && apt-get install -y git libclang-dev - -# Run all the build steps in ramdisk in an attempt to speed things up -RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ - && cd ${BUILD_DIR} \ - && rustup target add ${TARGET} \ - && rustup component add rustfmt \ - && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ - && mkdir -p /out \ - && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out - -FROM --platform=${TARGETPLATFORM} debian:bookworm -COPY --from=build /out/stacks-node /out/stacks-signer /out/stacks-inspect /bin/ -CMD ["stacks-node", "mainnet"] From f56bce39450a648ed4bf88c8271b261a567b94ff Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 5 Mar 2025 15:15:42 +0100 Subject: [PATCH 065/238] added max_execution_time to nakamoto miner --- clarity/src/vm/clarity.rs | 2 + clarity/src/vm/mod.rs | 3 +- stackslib/src/chainstate/coordinator/tests.rs | 9 ++- stackslib/src/chainstate/nakamoto/miner.rs | 20 +++-- stackslib/src/chainstate/nakamoto/shadow.rs | 1 + .../src/chainstate/nakamoto/tests/node.rs | 1 + .../chainstate/stacks/boot/contract_tests.rs | 1 + stackslib/src/chainstate/stacks/db/blocks.rs | 7 +- stackslib/src/chainstate/stacks/db/mod.rs | 2 + .../src/chainstate/stacks/db/transactions.rs | 66 +++++++++++++++- stackslib/src/chainstate/stacks/miner.rs | 39 ++++++++-- .../stacks/tests/chain_histories.rs | 77 ++++++++++++++++--- stackslib/src/clarity_vm/clarity.rs | 36 +++++++-- stackslib/src/clarity_vm/tests/contracts.rs | 17 +++- .../src/clarity_vm/tests/large_contract.rs | 32 +++++--- stackslib/src/config/mod.rs | 10 ++- stackslib/src/net/api/postblock_proposal.rs | 1 + .../src/net/api/tests/postblock_proposal.rs | 1 + stackslib/src/net/mod.rs | 1 + .../src/tests/nakamoto_integrations.rs | 1 + 20 files changed, 273 insertions(+), 54 deletions(-) diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index 1e503d14254..e3208287462 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -292,6 +292,7 @@ pub trait TransactionConnection: ClarityConnection { public_function: &str, args: &[Value], abort_call_back: F, + max_execution_time: Option, ) -> Result<(Value, AssetMap, Vec), Error> where F: FnOnce(&AssetMap, &mut ClarityDatabase) -> bool, @@ -303,6 +304,7 @@ pub trait TransactionConnection: ClarityConnection { self.with_abort_callback( |vm_env| { + vm_env.context.set_max_execution_time(max_execution_time); vm_env .execute_transaction( sender.clone(), diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 3872906f590..7bb0a81fe00 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -54,7 +54,6 @@ pub mod test_util; pub mod clarity; use std::collections::BTreeMap; -use std::time::Duration; use serde_json; use stacks_common::types::StacksEpochId; @@ -607,7 +606,7 @@ pub fn execute(program: &str) -> Result> { #[cfg(any(test, feature = "testing"))] pub fn execute_with_max_execution_time( program: &str, - max_execution_time: Duration, + max_execution_time: std::time::Duration, ) -> Result> { execute_with_parameters_and_call_in_global_context( program, diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index c7bb456f44d..79aa04f0440 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -392,6 +392,7 @@ pub fn setup_states_with_epochs( Value::UInt(burnchain.pox_constants.pox_rejection_fraction as u128), ], |_, _| false, + None, ) .expect("Failed to set burnchain parameters in PoX contract"); }); @@ -666,7 +667,7 @@ fn make_genesis_block_with_recipients( .0; builder - .try_mine_tx(&mut epoch_tx, &coinbase_op, ast_rules) + .try_mine_tx(&mut epoch_tx, &coinbase_op, ast_rules, None) .unwrap(); let block = builder.mine_anchored_block(&mut epoch_tx); @@ -931,11 +932,13 @@ fn make_stacks_block_with_input( .0; builder - .try_mine_tx(&mut epoch_tx, &coinbase_op, ast_rules) + .try_mine_tx(&mut epoch_tx, &coinbase_op, ast_rules, None) .unwrap(); for tx in txs { - builder.try_mine_tx(&mut epoch_tx, tx, ast_rules).unwrap(); + builder + .try_mine_tx(&mut epoch_tx, tx, ast_rules, None) + .unwrap(); } let block = builder.mine_anchored_block(&mut epoch_tx); diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index d9ad1319f71..93f5a871211 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -691,6 +691,7 @@ impl BlockBuilder for NakamotoBlockBuilder { tx_len: u64, limit_behavior: &BlockLimitFunction, ast_rules: ASTRules, + max_execution_time: Option, ) -> TransactionResult { if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { return TransactionResult::skipped_due_to_error(tx, Error::BlockTooBigError); @@ -737,13 +738,18 @@ impl BlockBuilder for NakamotoBlockBuilder { } let cost_before = clarity_tx.cost_so_far(); - let (fee, receipt) = - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ast_rules) { - Ok(x) => x, - Err(e) => { - return parse_process_transaction_error(clarity_tx, tx, e); - } - }; + let (fee, receipt) = match StacksChainState::process_transaction( + clarity_tx, + tx, + quiet, + ast_rules, + max_execution_time, + ) { + Ok(x) => x, + Err(e) => { + return parse_process_transaction_error(clarity_tx, tx, e); + } + }; let cost_after = clarity_tx.cost_so_far(); let mut soft_limit_reached = false; // We only attempt to apply the soft limit to non-boot code contract calls. diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index 6b00e9ac400..7ea84b7902a 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -539,6 +539,7 @@ impl NakamotoBlockBuilder { tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize, + None, ) { TransactionResult::Success(..) => { debug!("Included {}", &tx.txid()); diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 9abe9b9a4e8..d9bfafcb1ed 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -1037,6 +1037,7 @@ impl TestStacksNode { tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize, + None, ) { TransactionResult::Success(..) => { debug!("Included {}", &tx.txid()); diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 2d88cfe2344..b39635dd92f 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -1178,6 +1178,7 @@ fn pox_2_delegate_extend_units() { Value::UInt(0), ], |_, _| false, + None, ) }) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index af1129b9f04..2e6b74596df 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4010,7 +4010,7 @@ impl StacksChainState { debug!("Process microblock {}", µblock.block_hash()); for (tx_index, tx) in microblock.txs.iter().enumerate() { let (tx_fee, mut tx_receipt) = - StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules) + StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules, None) .map_err(|e| (e, microblock.block_hash()))?; tx_receipt.microblock_header = Some(microblock.header.clone()); @@ -4176,6 +4176,7 @@ impl StacksChainState { "stack-stx", &args, |_, _| false, + None, ) }); match result { @@ -4384,6 +4385,7 @@ impl StacksChainState { reward_addr_val, ], |_, _| false, + None, ) }); match result { @@ -4490,6 +4492,7 @@ impl StacksChainState { Value::UInt(reward_cycle.clone().into()), ], |_, _| false, + None, ) }); match result { @@ -4567,7 +4570,7 @@ impl StacksChainState { let mut receipts = vec![]; for tx in block_txs.iter() { let (tx_fee, mut tx_receipt) = - StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules)?; + StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules, None)?; fees = fees.checked_add(u128::from(tx_fee)).expect("Fee overflow"); tx_receipt.tx_index = tx_index; burns = burns diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 78811ff0b80..bab5d57a722 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1323,6 +1323,7 @@ impl StacksChainState { &boot_code_smart_contract, &boot_code_account, ASTRules::PrecheckSize, + None, ) })?; receipts.push(tx_receipt); @@ -1646,6 +1647,7 @@ impl StacksChainState { "set-burnchain-parameters", ¶ms, |_, _| false, + None, ) .expect("Failed to set burnchain parameters in PoX contract"); }); diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index f92bde7d981..22e3797e454 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -974,6 +974,7 @@ impl StacksChainState { tx: &StacksTransaction, origin_account: &StacksAccount, ast_rules: ASTRules, + max_execution_time: Option, ) -> Result { match tx.payload { TransactionPayload::TokenTransfer(ref addr, ref amount, ref memo) => { @@ -1044,6 +1045,7 @@ impl StacksChainState { ) .expect("FATAL: error while evaluating post-conditions") }, + max_execution_time, ); let mut total_cost = clarity_tx.cost_so_far(); @@ -1471,6 +1473,7 @@ impl StacksChainState { tx: &StacksTransaction, quiet: bool, ast_rules: ASTRules, + max_execution_time: Option, ) -> Result<(u64, StacksTransactionReceipt), Error> { debug!("Process transaction {} ({})", tx.txid(), tx.payload.name()); let epoch = clarity_block.get_epoch(); @@ -1509,6 +1512,7 @@ impl StacksChainState { tx, &origin_account, ast_rules, + max_execution_time, )?; // update the account nonces @@ -1537,6 +1541,7 @@ impl StacksChainState { tx, &origin_account, ast_rules, + None, )?; let new_payer_account = StacksChainState::get_payer_account(&mut transaction, tx); @@ -1700,6 +1705,7 @@ pub mod test { stx_balance: STXBalance::Unlocked { amount: 100 }, }, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -1764,6 +1770,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -1815,6 +1822,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2009,6 +2017,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ); if let Err(Error::InvalidStacksTransaction(msg, false)) = res { assert!(msg.contains(&err_frag), "{err_frag}"); @@ -2099,6 +2108,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2179,6 +2189,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2273,6 +2284,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ); if expected_behavior[i] { assert!(res.is_ok()); @@ -2366,6 +2378,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2470,6 +2483,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2558,6 +2572,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2671,6 +2686,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2683,6 +2699,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2805,6 +2822,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2820,6 +2838,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2895,6 +2914,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2946,6 +2966,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3010,6 +3031,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3115,6 +3137,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3153,6 +3176,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ); assert!(res.is_err()); @@ -3184,6 +3208,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3224,6 +3249,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ); assert!(res.is_ok()); @@ -3349,6 +3375,7 @@ pub mod test { &signed_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3365,6 +3392,7 @@ pub mod test { &signed_tx_2, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3876,6 +3904,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -3901,6 +3930,7 @@ pub mod test { tx_pass, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_stackaroos_balance += 100; @@ -3931,6 +3961,7 @@ pub mod test { tx_pass, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_stackaroos_balance -= 100; @@ -3978,6 +4009,7 @@ pub mod test { tx_pass, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_nonce += 1; @@ -4008,6 +4040,7 @@ pub mod test { tx_fail, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_nonce += 1; @@ -4051,6 +4084,7 @@ pub mod test { tx_fail, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_recv_nonce += 1; @@ -4099,6 +4133,7 @@ pub mod test { tx_fail, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_nonce += 1; @@ -4594,6 +4629,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -4618,6 +4654,7 @@ pub mod test { tx_pass, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_stackaroos_balance += 100; @@ -4665,6 +4702,7 @@ pub mod test { tx_pass, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_stackaroos_balance -= 100; @@ -4731,6 +4769,7 @@ pub mod test { tx_fail, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_nonce += 1; @@ -4789,6 +4828,7 @@ pub mod test { tx_fail, false, ASTRules::PrecheckSize, + None, ) .unwrap(); expected_recv_nonce += 1; @@ -4955,6 +4995,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -4963,6 +5004,7 @@ pub mod test { &contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -8059,6 +8101,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); let err = StacksChainState::process_transaction( @@ -8066,6 +8109,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap_err(); @@ -8090,6 +8134,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); let (fee, _) = StacksChainState::process_transaction( @@ -8097,6 +8142,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -8244,6 +8290,7 @@ pub mod test { &signed_tx_poison_microblock, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -8364,6 +8411,7 @@ pub mod test { &signed_tx_poison_microblock, false, ASTRules::PrecheckSize, + None, ) .unwrap_err(); let Error::ClarityError(clarity_error::BadTransaction(msg)) = &err else { @@ -8482,6 +8530,7 @@ pub mod test { &signed_tx_poison_microblock_1, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -8496,6 +8545,7 @@ pub mod test { &signed_tx_poison_microblock_2, false, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -8761,6 +8811,7 @@ pub mod test { &smart_contract_v2, false, ASTRules::PrecheckSize, + None, ) { assert!(msg.find("not in Stacks epoch 2.1 or later").is_some()); } else { @@ -9053,6 +9104,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9062,6 +9114,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 1); @@ -9081,6 +9134,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9090,6 +9144,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 1); @@ -9109,6 +9164,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9118,6 +9174,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap_err(); conn.commit_block(); @@ -9220,6 +9277,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9229,6 +9287,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 1); @@ -9248,6 +9307,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9257,6 +9317,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 1); @@ -9276,6 +9337,7 @@ pub mod test { &signed_contract_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 0); @@ -9285,6 +9347,7 @@ pub mod test { &signed_contract_call_tx, false, ASTRules::PrecheckSize, + None, ) .unwrap_err(); conn.commit_block(); @@ -9310,7 +9373,7 @@ pub mod test { return Err(Error::InvalidStacksTransaction(msg, false)); } - StacksChainState::process_transaction(clarity_block, tx, quiet, ast_rules) + StacksChainState::process_transaction(clarity_block, tx, quiet, ast_rules, None) } #[test] @@ -9875,6 +9938,7 @@ pub mod test { &signed_runtime_checkerror_tx_clar1, false, ASTRules::PrecheckSize, + None, ) .unwrap(); assert_eq!(fee, 1); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 819de80a333..17fc44cee44 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -219,6 +219,7 @@ pub struct BlockBuilderSettings { pub miner_status: Arc>, /// Should the builder attempt to confirm any parent microblocks pub confirm_microblocks: bool, + pub max_execution_time: Option, } impl BlockBuilderSettings { @@ -230,6 +231,7 @@ impl BlockBuilderSettings { mempool_settings: MemPoolWalkSettings::default(), miner_status: Arc::new(Mutex::new(MinerStatus::make_ready(0))), confirm_microblocks: true, + max_execution_time: None, } } @@ -241,6 +243,7 @@ impl BlockBuilderSettings { mempool_settings: MemPoolWalkSettings::zero(), miner_status: Arc::new(Mutex::new(MinerStatus::make_ready(0))), confirm_microblocks: true, + max_execution_time: None, } } } @@ -679,6 +682,7 @@ pub trait BlockBuilder { tx_len: u64, limit_behavior: &BlockLimitFunction, ast_rules: ASTRules, + max_execution_time: Option, ) -> TransactionResult; /// Append a transaction if doing so won't exceed the epoch data size. @@ -688,6 +692,7 @@ pub trait BlockBuilder { clarity_tx: &mut ClarityTx, tx: &StacksTransaction, ast_rules: ASTRules, + max_execution_time: Option, ) -> Result { let tx_len = tx.tx_len(); match self.try_mine_tx_with_len( @@ -696,6 +701,7 @@ pub trait BlockBuilder { tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ast_rules, + max_execution_time, ) { TransactionResult::Success(s) => Ok(TransactionResult::Success(s)), TransactionResult::Skipped(TransactionSkipped { error, .. }) @@ -1053,7 +1059,7 @@ impl<'a> StacksMicroblockBuilder<'a> { } let quiet = !cfg!(test); - match StacksChainState::process_transaction(clarity_tx, &tx, quiet, ast_rules) { + match StacksChainState::process_transaction(clarity_tx, &tx, quiet, ast_rules, None) { Ok((fee, receipt)) => Ok(TransactionResult::success(&tx, fee, receipt)), Err(e) => { let (is_problematic, e) = @@ -1686,7 +1692,13 @@ impl StacksBlockBuilder { let quiet = !cfg!(test); if !self.anchored_done { // save - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { + match StacksChainState::process_transaction( + clarity_tx, + tx, + quiet, + ASTRules::Typical, + None, + ) { Ok((fee, receipt)) => { self.total_anchored_fees += fee; } @@ -1697,7 +1709,13 @@ impl StacksBlockBuilder { self.txs.push(tx.clone()); } else { - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { + match StacksChainState::process_transaction( + clarity_tx, + tx, + quiet, + ASTRules::Typical, + None, + ) { Ok((fee, receipt)) => { self.total_streamed_fees += fee; } @@ -2097,7 +2115,7 @@ impl StacksBlockBuilder { let ast_rules = miner_epoch_info.ast_rules; let (mut epoch_tx, _) = builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; for tx in txs.into_iter() { - match builder.try_mine_tx(&mut epoch_tx, &tx, ast_rules.clone()) { + match builder.try_mine_tx(&mut epoch_tx, &tx, ast_rules.clone(), None) { Ok(_) => { debug!("Included {}", &tx.txid()); } @@ -2267,7 +2285,12 @@ impl StacksBlockBuilder { for initial_tx in initial_txs.iter() { tx_events.push( builder - .try_mine_tx(epoch_tx, initial_tx, ast_rules.clone())? + .try_mine_tx( + epoch_tx, + initial_tx, + ast_rules.clone(), + settings.max_execution_time, + )? .convert_to_event(), ); } @@ -2407,6 +2430,7 @@ impl StacksBlockBuilder { txinfo.metadata.len, &block_limit_hit, ast_rules, + settings.max_execution_time, ); let result_event = tx_result.convert_to_event(); @@ -2732,6 +2756,7 @@ impl BlockBuilder for StacksBlockBuilder { tx_len: u64, limit_behavior: &BlockLimitFunction, ast_rules: ASTRules, + _max_execution_time: Option, ) -> TransactionResult { if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { return TransactionResult::skipped_due_to_error(tx, Error::BlockTooBigError); @@ -2797,7 +2822,7 @@ impl BlockBuilder for StacksBlockBuilder { return TransactionResult::problematic(tx, Error::NetError(e)); } let (fee, receipt) = match StacksChainState::process_transaction( - clarity_tx, tx, quiet, ast_rules, + clarity_tx, tx, quiet, ast_rules, None, ) { Ok((fee, receipt)) => (fee, receipt), Err(e) => { @@ -2887,7 +2912,7 @@ impl BlockBuilder for StacksBlockBuilder { return TransactionResult::problematic(tx, Error::NetError(e)); } let (fee, receipt) = match StacksChainState::process_transaction( - clarity_tx, tx, quiet, ast_rules, + clarity_tx, tx, quiet, ast_rules, None, ) { Ok((fee, receipt)) => (fee, receipt), Err(e) => { diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index 0ad4549ecd3..e91b00bad1a 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -2734,7 +2734,12 @@ pub fn mine_empty_anchored_block( let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2769,7 +2774,12 @@ pub fn mine_empty_anchored_block_with_burn_height_pubkh( let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2804,7 +2814,12 @@ pub fn mine_empty_anchored_block_with_stacks_height_pubkh( let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2835,7 +2850,12 @@ pub fn mine_invalid_token_transfers_block( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let recipient = @@ -2909,7 +2929,12 @@ pub fn mine_smart_contract_contract_call_block( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); // make a smart contract @@ -2919,7 +2944,12 @@ pub fn mine_smart_contract_contract_call_block( builder.header.total_work.work as usize, ); builder - .try_mine_tx(clarity_tx, &tx_contract_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_contract_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); // make a contract call @@ -2931,7 +2961,12 @@ pub fn mine_smart_contract_contract_call_block( 2, ); builder - .try_mine_tx(clarity_tx, &tx_contract_call_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_contract_call_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -2986,7 +3021,12 @@ pub fn mine_smart_contract_block_contract_call_microblock( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); // make a smart contract @@ -2996,7 +3036,12 @@ pub fn mine_smart_contract_block_contract_call_microblock( builder.header.total_work.work as usize, ); builder - .try_mine_tx(clarity_tx, &tx_contract_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_contract_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); @@ -3073,7 +3118,12 @@ pub fn mine_smart_contract_block_contract_call_microblock_exception( // make a coinbase for this miner let tx_coinbase_signed = make_coinbase(miner, burnchain_height); builder - .try_mine_tx(clarity_tx, &tx_coinbase_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_coinbase_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); // make a smart contract @@ -3083,7 +3133,12 @@ pub fn mine_smart_contract_block_contract_call_microblock_exception( builder.header.total_work.work as usize, ); builder - .try_mine_tx(clarity_tx, &tx_contract_signed, ASTRules::PrecheckSize) + .try_mine_tx( + clarity_tx, + &tx_contract_signed, + ASTRules::PrecheckSize, + None, + ) .unwrap(); let stacks_block = builder.mine_anchored_block(clarity_tx); diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index 4d413200cc1..02832512bdb 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -878,6 +878,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &costs_2_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process PoX 2 contract initialization"); @@ -991,6 +992,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &pox_2_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process PoX 2 contract initialization"); @@ -1012,6 +1014,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { "set-burnchain-parameters", ¶ms, |_, _| false, + None, ) .expect("Failed to set burnchain parameters in PoX-2 contract"); @@ -1062,6 +1065,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &costs_3_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process costs-3 contract initialization"); @@ -1232,6 +1236,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &pox_3_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process PoX 3 contract initialization"); @@ -1253,6 +1258,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { "set-burnchain-parameters", ¶ms, |_, _| false, + None, ) .expect("Failed to set burnchain parameters in PoX-3 contract"); @@ -1349,6 +1355,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &pox_4_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process PoX 4 contract initialization"); @@ -1369,6 +1376,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { "set-burnchain-parameters", ¶ms, |_, _| false, + None, ) .expect("Failed to set burnchain parameters in PoX-3 contract"); @@ -1407,6 +1415,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &signers_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process .signers contract initialization"); receipt @@ -1453,6 +1462,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &signers_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process .signers DB contract initialization"); receipt @@ -1493,6 +1503,7 @@ impl<'a> ClarityBlockConnection<'a, '_> { &signers_contract_tx, &boot_code_account, ASTRules::PrecheckSize, + None, ) .expect("FATAL: Failed to process .signers-voting contract initialization"); receipt @@ -2218,7 +2229,8 @@ mod tests { &contract_identifier, "foo", &[Value::Int(1)], - |_, _| false + |_, _| false, + None )) .unwrap() .0, @@ -2501,7 +2513,8 @@ mod tests { &contract_identifier, "get-bar", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0, @@ -2515,7 +2528,8 @@ mod tests { &contract_identifier, "set-bar", &[Value::Int(1), Value::Int(1)], - |_, _| false + |_, _| false, + None )) .unwrap() .0, @@ -2531,6 +2545,7 @@ mod tests { "set-bar", &[Value::Int(10), Value::Int(1)], |_, _| true, + None, ) }) .unwrap_err(); @@ -2550,7 +2565,8 @@ mod tests { &contract_identifier, "get-bar", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0, @@ -2565,7 +2581,8 @@ mod tests { &contract_identifier, "set-bar", &[Value::Int(10), Value::Int(0)], - |_, _| true + |_, _| true, + None )) .unwrap_err() ) @@ -2579,7 +2596,8 @@ mod tests { &contract_identifier, "get-bar", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0, @@ -2690,6 +2708,7 @@ mod tests { &tx1, &account, ASTRules::PrecheckSize, + None, ) .unwrap(); assert!(receipt.post_condition_aborted); @@ -2700,6 +2719,7 @@ mod tests { &tx2, &account, ASTRules::PrecheckSize, + None, ) .unwrap(); }); @@ -2710,6 +2730,7 @@ mod tests { &tx3, &account, ASTRules::PrecheckSize, + None, ) .unwrap(); @@ -2893,7 +2914,8 @@ mod tests { &contract_identifier, "do-expand", &[], - |_, _| false + |_, _| false, + None )) .unwrap_err() { diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index 75f14fcc490..bf54fb14b0d 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -448,6 +448,7 @@ fn trait_invocation_cross_epoch() { "invocation-1", &[], |_, _| false, + None, ) .unwrap(); }); @@ -465,7 +466,7 @@ fn trait_invocation_cross_epoch() { &invoke_contract_id, "invocation-1", &[], - |_, _| false, + |_, _| false, None ) .unwrap_err(); @@ -488,7 +489,7 @@ fn trait_invocation_cross_epoch() { &invoke_contract_id, "invocation-2", &[Value::Principal(impl_contract_id.clone().into())], - |_, _| false, + |_, _| false, None ) .unwrap_err(); @@ -513,6 +514,7 @@ fn trait_invocation_cross_epoch() { "invocation-1", &[], |_, _| false, + None, ) .unwrap(); }); @@ -530,6 +532,7 @@ fn trait_invocation_cross_epoch() { "invocation-2", &[Value::Principal(impl_contract_id.clone().into())], |_, _| false, + None, ) .unwrap(); }); @@ -784,6 +787,7 @@ fn trait_with_trait_invocation_cross_epoch() { "do-it-static", &[], |_, _| false, + None, ) .unwrap(); }); @@ -804,6 +808,7 @@ fn trait_with_trait_invocation_cross_epoch() { Value::UInt(1), ], |_, _| false, + None, ) .unwrap(); }); @@ -821,6 +826,7 @@ fn trait_with_trait_invocation_cross_epoch() { "do-it-static", &[], |_, _| false, + None, ) .unwrap(); }); @@ -841,6 +847,7 @@ fn trait_with_trait_invocation_cross_epoch() { Value::UInt(1), ], |_, _| false, + None, ) .unwrap(); }); @@ -858,6 +865,7 @@ fn trait_with_trait_invocation_cross_epoch() { "do-it-static", &[], |_, _| false, + None, ) .unwrap(); }); @@ -878,6 +886,7 @@ fn trait_with_trait_invocation_cross_epoch() { Value::UInt(1), ], |_, _| false, + None, ) .unwrap(); }); @@ -1428,6 +1437,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { "get-it", &[Value::Principal(contract_id_e3c3.clone().into())], |_, _| false, + None, ) .unwrap(); assert_eq!(Value::okay(Value::UInt(20)).unwrap(), res1.0); @@ -1440,6 +1450,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { "get-it", &[Value::Principal(contract_id_e3c3.clone().into())], |_, _| false, + None, ) .unwrap(); assert_eq!(Value::okay(Value::UInt(20)).unwrap(), res2.0); @@ -1571,6 +1582,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { "get-it", &[Value::Principal(contract_id_e2c1.clone().into())], |_, _| false, + None, ) .unwrap(); assert_eq!(Value::okay(Value::UInt(777)).unwrap(), res1.0); @@ -1583,6 +1595,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { "get-it", &[Value::Principal(contract_id_e2c2.clone().into())], |_, _| false, + None, ) .unwrap(); assert_eq!(Value::okay(Value::UInt(777)).unwrap(), res2.0); diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 7124ce571b5..9fecb3bbc36 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -234,7 +234,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "token-transfer", &[p1.clone().into(), Value::UInt(210)], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -247,7 +248,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "token-transfer", &[p2.clone().into(), Value::UInt(9000)], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -261,7 +263,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "token-transfer", &[p2.clone().into(), Value::UInt(1001)], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -269,7 +272,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac assert!(is_committed( & // send to self! block.as_transaction(|tx| tx.run_contract_call(&p1, None, &contract_identifier, "token-transfer", - &[p1.clone().into(), Value::UInt(1000)], |_, _| false)).unwrap().0 + &[p1.clone().into(), Value::UInt(1000)], |_, _| false, None)).unwrap().0 )); assert_eq!( @@ -299,7 +302,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -313,7 +317,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -327,7 +332,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -351,7 +357,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "mint-after", &[Value::UInt(25)], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -388,7 +395,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "mint-after", &[Value::UInt(25)], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -402,7 +410,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "faucet", &[], - |_, _| false + |_, _| false, + None )) .unwrap() .0 @@ -425,7 +434,8 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac &contract_identifier, "my-get-token-balance", &[p1.clone().into()], - |_, _| false + |_, _| false, + None )) .unwrap() .0, diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 937c90ebdc8..8aa7309033e 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -1104,6 +1104,7 @@ impl Config { }, miner_status, confirm_microblocks: false, + max_execution_time: miner_config.max_execution_time, } } @@ -1146,6 +1147,7 @@ impl Config { }, miner_status, confirm_microblocks: true, + max_execution_time: miner_config.max_execution_time, } } @@ -2177,6 +2179,8 @@ pub struct MinerConfig { pub tenure_extend_cost_threshold: u64, /// Define the timeout to apply while waiting for signers responses, based on the amount of rejections pub block_rejection_timeout_steps: HashMap, + + pub max_execution_time: Option, } impl Default for MinerConfig { @@ -2226,6 +2230,7 @@ impl Default for MinerConfig { rejections_timeouts_default_map.insert(30, Duration::from_secs(0)); rejections_timeouts_default_map }, + max_execution_time: None, } } } @@ -2625,6 +2630,7 @@ pub struct MinerConfigFile { pub tenure_timeout_secs: Option, pub tenure_extend_cost_threshold: Option, pub block_rejection_timeout_steps: Option>, + pub max_execution_time: Option, } impl MinerConfigFile { @@ -2786,7 +2792,9 @@ impl MinerConfigFile { } else{ miner_default_config.block_rejection_timeout_steps } - } + }, + + max_execution_time: self.max_execution_time }) } } diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 7047eba6109..2d18191be46 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -549,6 +549,7 @@ impl NakamotoBlockProposal { tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize, + None, ); let err = match tx_result { TransactionResult::Success(_) => Ok(()), diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 9347d8384bd..f561567d3cb 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -309,6 +309,7 @@ fn test_try_make_response() { tx.tx_len(), &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize, + None, ); let block = builder.mine_nakamoto_block(&mut tenure_tx); Ok(block) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 616ea8f81fb..02d35301ebd 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3097,6 +3097,7 @@ pub mod test { &boot_code_smart_contract, &boot_code_account, ASTRules::PrecheckSize, + None, ) .unwrap() }); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f1117be811e..0c36dc96db5 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3079,6 +3079,7 @@ fn block_proposal_api_endpoint() { tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize, + None, ); assert!( matches!(res, TransactionResult::Success(..)), From dd35d77247203638520b88622e27d1fff59f5bcb Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 5 Mar 2025 07:49:42 -0800 Subject: [PATCH 066/238] fix: flaky reload_miner_config test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ebd2bc5c4e5..a0ac4707312 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -11113,6 +11113,8 @@ fn reload_miner_config() { // setup sender + recipient for some test stx transfers // these are necessary for the interim blocks to get mined at all let sender_addr = tests::to_addr(&sender_sk); + let old_burn_fee_cap: u64 = 200000; + conf.burnchain.burn_fee_cap = 200000; conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 1000000); conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); @@ -11193,7 +11195,7 @@ fn reload_miner_config() { .map(|r| r.get("amt").unwrap().as_u64().unwrap()) .sum::(); - assert_eq!(reward_amount, 200000); + assert_eq!(reward_amount, old_burn_fee_cap); next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); From 9e655a2a9e43c732b01bd00c118b8160043e11a8 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 5 Mar 2025 11:17:34 -0500 Subject: [PATCH 067/238] test: verify that `BlockInfo` is backwards compatible --- stacks-signer/src/signerdb.rs | 73 +++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 0ddb0b10620..5d166a3ecfb 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -2196,4 +2196,77 @@ mod tests { .unwrap() .is_none()); } + + /// BlockInfo without the `reject_reason` field for backwards compatibility testing + #[derive(Serialize, Deserialize, Debug, PartialEq)] + pub struct BlockInfoPrev { + /// The block we are considering + pub block: NakamotoBlock, + /// The burn block height at which the block was proposed + pub burn_block_height: u64, + /// The reward cycle the block belongs to + pub reward_cycle: u64, + /// Our vote on the block if we have one yet + pub vote: Option, + /// Whether the block contents are valid + pub valid: Option, + /// Whether this block is already being signed over + pub signed_over: bool, + /// Time at which the proposal was received by this signer (epoch time in seconds) + pub proposed_time: u64, + /// Time at which the proposal was signed by this signer (epoch time in seconds) + pub signed_self: Option, + /// Time at which the proposal was signed by a threshold in the signer set (epoch time in seconds) + pub signed_group: Option, + /// The block state relative to the signer's view of the stacks blockchain + pub state: BlockState, + /// Consumed processing time in milliseconds to validate this block + pub validation_time_ms: Option, + /// Extra data specific to v0, v1, etc. + pub ext: ExtraBlockInfo, + } + + /// Verify that we can deserialize the old BlockInfo struct into the new version + #[test] + fn deserialize_old_block_info() { + let block_info_prev = BlockInfoPrev { + block: NakamotoBlock { + header: NakamotoBlockHeader::genesis(), + txs: vec![], + }, + burn_block_height: 2, + reward_cycle: 3, + vote: None, + valid: None, + signed_over: true, + proposed_time: 4, + signed_self: None, + signed_group: None, + state: BlockState::Unprocessed, + validation_time_ms: Some(5), + ext: ExtraBlockInfo::default(), + }; + + let block_info: BlockInfo = + serde_json::from_value(serde_json::to_value(&block_info_prev).unwrap()).unwrap(); + assert_eq!(block_info.block, block_info_prev.block); + assert_eq!( + block_info.burn_block_height, + block_info_prev.burn_block_height + ); + assert_eq!(block_info.reward_cycle, block_info_prev.reward_cycle); + assert_eq!(block_info.vote, block_info_prev.vote); + assert_eq!(block_info.valid, block_info_prev.valid); + assert_eq!(block_info.signed_over, block_info_prev.signed_over); + assert_eq!(block_info.proposed_time, block_info_prev.proposed_time); + assert_eq!(block_info.signed_self, block_info_prev.signed_self); + assert_eq!(block_info.signed_group, block_info_prev.signed_group); + assert_eq!(block_info.state, block_info_prev.state); + assert_eq!( + block_info.validation_time_ms, + block_info_prev.validation_time_ms + ); + assert_eq!(block_info.ext, block_info_prev.ext); + assert!(block_info.reject_reason.is_none()); + } } From b9691b2010bf95925afe033a6dfe06e14680839e Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 5 Mar 2025 08:27:20 -0800 Subject: [PATCH 068/238] Removing microblock test --- .github/workflows/epoch-tests.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/epoch-tests.yml b/.github/workflows/epoch-tests.yml index be00618d505..bccedf7056e 100644 --- a/.github/workflows/epoch-tests.yml +++ b/.github/workflows/epoch-tests.yml @@ -29,7 +29,6 @@ jobs: max-parallel: 32 matrix: test-name: - - tests::epoch_205::bigger_microblock_streams_in_2_05 - tests::epoch_205::test_cost_limit_switch_version205 - tests::epoch_205::test_dynamic_db_method_costs - tests::epoch_205::test_exact_block_costs From 658e051ddf63313fe65b3c395a8a246d274bc4a3 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 5 Mar 2025 11:31:05 -0500 Subject: [PATCH 069/238] refactor: improve flow deciding whether to reevaluate block --- stacks-signer/src/v0/signer.rs | 67 +++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 30 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 4525cc966d0..8e2932eaf54 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -559,38 +559,19 @@ impl Signer { // TODO: should add a check to ignore an old burn block height if we know its outdated. Would require us to store the burn block height we last saw on the side. // the signer needs to be able to determine whether or not the block they're about to sign would conflict with an already-signed Stacks block let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); - if let Some(block_info) = self.block_lookup_by_reward_cycle(&signer_signature_hash) { - if should_reevaluate_block(&block_info) { - // Treat this case the same as if no block info was found + let prior_evaluation = self + .block_lookup_by_reward_cycle(&signer_signature_hash) + .and_then(|block_info| if should_reevaluate_block(&block_info) { + debug!("Received a proposal for this block before, but our rejection reason allows us to reconsider"; + "reject_reason" => ?block_info.reject_reason); + None } else { - let Some(block_response) = self.determine_response(&block_info) else { - // We are still waiting for a response for this block. Do nothing. - debug!( - "{self}: Received a block proposal for a block we are already validating."; - "signer_sighash" => %signer_signature_hash, - "block_id" => %block_proposal.block.block_id() - ); - return; - }; - - // Submit a proposal response to the .signers contract for miners - debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); - - let accepted = matches!(block_response, BlockResponse::Accepted(..)); - if let Err(e) = self - .stackerdb - .send_message_with_retry::(block_response.into()) - { - warn!("{self}: Failed to send block response to stacker-db: {e:?}"); - } else { - crate::monitoring::actions::increment_block_responses_sent(accepted); - crate::monitoring::actions::record_block_response_latency( - &block_proposal.block, - ); - } + Some(block_info) + }); - return; - } + // we previously considered this proposal, handle the status here + if let Some(block_info) = prior_evaluation { + return self.handle_prior_proposal_eval(&block_info); } info!( @@ -675,6 +656,32 @@ impl Signer { } } + fn handle_prior_proposal_eval(&mut self, block_info: &BlockInfo) { + let Some(block_response) = self.determine_response(&block_info) else { + // We are still waiting for a response for this block. Do nothing. + debug!( + "{self}: Received a block proposal for a block we are already validating."; + "signer_sighash" => %block_info.signer_signature_hash(), + "block_id" => %block_info.block.block_id() + ); + return; + }; + + // Submit a proposal response to the .signers contract for miners + debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); + + let accepted = matches!(block_response, BlockResponse::Accepted(..)); + if let Err(e) = self + .stackerdb + .send_message_with_retry::(block_response.into()) + { + warn!("{self}: Failed to send block response to stacker-db: {e:?}"); + } else { + crate::monitoring::actions::increment_block_responses_sent(accepted); + crate::monitoring::actions::record_block_response_latency(&block_info.block); + } + } + /// Handle block response messages from a signer fn handle_block_response( &mut self, From c2f79e9e4187ad44cd273093c61946acbff4eb4a Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 5 Mar 2025 08:35:45 -0800 Subject: [PATCH 070/238] fix: handle burn amount in assertion --- .../src/tests/nakamoto_integrations.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a0ac4707312..263d29c78be 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -11113,8 +11113,8 @@ fn reload_miner_config() { // setup sender + recipient for some test stx transfers // these are necessary for the interim blocks to get mined at all let sender_addr = tests::to_addr(&sender_sk); - let old_burn_fee_cap: u64 = 200000; - conf.burnchain.burn_fee_cap = 200000; + let old_burn_fee_cap: u64 = 100000; + conf.burnchain.burn_fee_cap = old_burn_fee_cap; conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 1000000); conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); @@ -11149,8 +11149,6 @@ fn reload_miner_config() { file.write_all(new_config.as_bytes()).unwrap(); }; - update_config(100000, 50); - let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); let run_loop_stopper = run_loop.get_termination_switch(); let counters = run_loop.counters(); @@ -11180,6 +11178,8 @@ fn reload_miner_config() { wait_for_first_naka_block_commit(60, &commits_submitted); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); let burn_blocks = test_observer::get_burn_blocks(); @@ -11195,7 +11195,9 @@ fn reload_miner_config() { .map(|r| r.get("amt").unwrap().as_u64().unwrap()) .sum::(); - assert_eq!(reward_amount, old_burn_fee_cap); + let burn_amount = burn_block.get("burn_amount").unwrap().as_u64().unwrap(); + + assert_eq!(reward_amount + burn_amount, old_burn_fee_cap); next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); @@ -11221,7 +11223,9 @@ fn reload_miner_config() { .map(|r| r.get("amt").unwrap().as_u64().unwrap()) .sum::(); - assert_eq!(reward_amount, new_amount); + let burn_amount = burn_block.get("burn_amount").unwrap().as_u64().unwrap(); + + assert_eq!(reward_amount + burn_amount, new_amount); coord_channel .lock() From 94c2a36482028da31f26354e888f6c574d596b46 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 5 Mar 2025 11:37:19 -0500 Subject: [PATCH 071/238] chore: fix clippy error --- stacks-signer/src/v0/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 8e2932eaf54..2decd05dc16 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -657,7 +657,7 @@ impl Signer { } fn handle_prior_proposal_eval(&mut self, block_info: &BlockInfo) { - let Some(block_response) = self.determine_response(&block_info) else { + let Some(block_response) = self.determine_response(block_info) else { // We are still waiting for a response for this block. Do nothing. debug!( "{self}: Received a block proposal for a block we are already validating."; From 59534745e4605cee9bdb1e3ef27346f567490cde Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Wed, 5 Mar 2025 17:55:09 +0100 Subject: [PATCH 072/238] add fee information to transactions log that end with success or skipped, #5601 --- stackslib/src/chainstate/stacks/miner.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 819de80a333..d269ea27435 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -422,6 +422,7 @@ impl TransactionResult { "event_name" => %"transaction_result", "tx_id" => %tx.txid(), "event_type" => %"success", + "fee" => tx.get_tx_fee() ); } @@ -445,6 +446,7 @@ impl TransactionResult { "tx_id" => %tx.txid(), "event_type" => "skip", "reason" => %err, + "fee" => tx.get_tx_fee() ); } From 15eec8d8a1316242211c239f5547bebc39e9b5cb Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Wed, 5 Mar 2025 18:05:27 +0100 Subject: [PATCH 073/238] update changelog for #5601 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9545caf061f..2d950419f07 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Added" +- Add fee information to transaction log ending with "success" or "skipped", while building a new block + ### Changed - When a miner times out waiting for signatures, it will re-propose the same block instead of building a new block ([#5877](https://github.com/stacks-network/stacks-core/pull/5877)) From c231380788f6a8a8ad46307bb816b1a605cde4bf Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 5 Mar 2025 13:51:43 -0500 Subject: [PATCH 074/238] feat: use .rustfmt.toml for formatting options We previously did this with the `cargo fmt-stacks` command because these config options were not supported in .rustfmt.toml, but they are now supported by the nightly build. Using this directly is better than the alias because we do not need to remember to run the alias. --- .cargo/config.toml | 1 - .rustfmt.toml | 2 ++ .vscode/settings.json | 10 +++++++--- 3 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 .rustfmt.toml diff --git a/.cargo/config.toml b/.cargo/config.toml index feaf5fec86d..3c6d5f7019e 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,6 +1,5 @@ [alias] stacks-node = "run --package stacks-node --" -fmt-stacks = "fmt -- --config group_imports=StdExternalCrate,imports_granularity=Module" clippy-stacks = "clippy -p libstackerdb -p stacks-signer -p pox-locking -p clarity -p libsigner -p stacks-common --no-deps --tests --all-features -- -D warnings" # Uncomment to improve performance slightly, at the cost of portability diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 00000000000..64d94def266 --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1,2 @@ +group_imports = "StdExternalCrate" +imports_granularity = "Module" diff --git a/.vscode/settings.json b/.vscode/settings.json index ab8db95f5d9..a52a694e333 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,7 +1,11 @@ { - "lldb.adapterType": "native", - "lldb.launch.sourceLanguages": ["rust"], + "lldb.launch.sourceLanguages": [ + "rust" + ], "rust-analyzer.runnables.extraEnv": { "BITCOIND_TEST": "1" - } + }, + "rust-analyzer.rustfmt.extraArgs": [ + "+nightly" + ] } \ No newline at end of file From e7d64792cec11942123dad4526b0d9f888c41162 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 5 Mar 2025 14:00:38 -0500 Subject: [PATCH 075/238] fix: update `Rust Format` CI job --- .github/workflows/ci.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e32148c06fc..d82dad02d23 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,8 +47,6 @@ jobs: - name: Rustfmt id: rustfmt uses: stacks-network/actions/rustfmt@main - with: - alias: "fmt-stacks" ###################################################################################### ## Check if the branch that this workflow is being run against is a release branch From 7ae498f3ae1cc186d1e5d34b31e35f6dd1023e51 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 5 Mar 2025 14:10:47 -0500 Subject: [PATCH 076/238] ci: try passing `+nightly` in alias for Rustfmt job --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d82dad02d23..1aedf269e7c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,6 +47,8 @@ jobs: - name: Rustfmt id: rustfmt uses: stacks-network/actions/rustfmt@main + with: + alias: "+nightly fmt" ###################################################################################### ## Check if the branch that this workflow is being run against is a release branch From 13341de500f59f61a1788e5d0c971adb90c7ee9e Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 5 Mar 2025 15:04:56 -0800 Subject: [PATCH 077/238] fix: compare origin when sending pending payload --- testnet/stacks-node/src/event_dispatcher.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 4b16ea61e84..45f563238ef 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -438,8 +438,17 @@ impl EventObserver { }; for (id, url, payload, timeout_ms) in pending_payloads { + let full_url = Url::parse(url.as_str()) + .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {url} as a URL")); + let endport_url = Url::parse(format!("http://{}", &self.endpoint).as_str()) + .unwrap_or_else(|_| { + panic!( + "Event dispatcher: unable to parse {} as a URL", + &self.endpoint + ) + }); // If the URL is not the same as the endpoint, skip it - if !url.starts_with(&self.endpoint) { + if full_url.origin() != endport_url.origin() { continue; } let timeout = Duration::from_millis(timeout_ms); @@ -2049,7 +2058,7 @@ mod test { let db_path = dir.path().join("event_observers.sqlite"); let db_path_str = db_path.to_str().unwrap(); let mut server = mockito::Server::new(); - let endpoint = server.url().to_string(); + let endpoint = server.host_with_port(); let timeout = Duration::from_secs(5); let observer = EventObserver::new(Some(dir.path().to_path_buf()), endpoint.clone(), timeout); @@ -2093,7 +2102,7 @@ mod test { let db_path_str = db_path.to_str().unwrap(); let mut server = mockito::Server::new(); - let endpoint = server.url().to_string(); + let endpoint = server.host_with_port(); let timeout = Duration::from_secs(5); let observer = EventObserver::new(Some(dir.path().to_path_buf()), endpoint.clone(), timeout); From d718960525ffaf85b077bed7a9ef0037a2a62c7e Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 6 Mar 2025 08:43:23 +0100 Subject: [PATCH 078/238] remove dot character at end of transaction success log primary message, #5601 --- stackslib/src/chainstate/stacks/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index d269ea27435..82dfb53fd80 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -418,7 +418,7 @@ pub enum TransactionEvent { impl TransactionResult { /// Logs a queryable message for the case where `txid` has succeeded. pub fn log_transaction_success(tx: &StacksTransaction) { - info!("Tx successfully processed."; + info!("Tx successfully processed"; "event_name" => %"transaction_result", "tx_id" => %tx.txid(), "event_type" => %"success", From 8ce210b02e922a4b1a4622027e401c10cf26b0a9 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 6 Mar 2025 11:12:25 +0100 Subject: [PATCH 079/238] refactor: removed fee argument from TransactionResult::success method and updated related tests, #5900 --- stackslib/src/chainstate/stacks/miner.rs | 9 ++++----- .../chainstate/stacks/tests/block_construction.rs | 1 - stackslib/src/core/tests/mod.rs | 14 -------------- 3 files changed, 4 insertions(+), 20 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 819de80a333..6e9da740a8a 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -463,13 +463,12 @@ impl TransactionResult { /// This method logs "transaction success" as a side effect. pub fn success( transaction: &StacksTransaction, - fee: u64, receipt: StacksTransactionReceipt, ) -> TransactionResult { Self::log_transaction_success(transaction); Self::Success(TransactionSuccess { tx: transaction.clone(), - fee, + fee: transaction.get_tx_fee(), receipt, soft_limit_reached: false, }) @@ -1054,7 +1053,7 @@ impl<'a> StacksMicroblockBuilder<'a> { let quiet = !cfg!(test); match StacksChainState::process_transaction(clarity_tx, &tx, quiet, ast_rules) { - Ok((fee, receipt)) => Ok(TransactionResult::success(&tx, fee, receipt)), + Ok((fee, receipt)) => Ok(TransactionResult::success(&tx, receipt)), Err(e) => { let (is_problematic, e) = TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); @@ -2858,7 +2857,7 @@ impl BlockBuilder for StacksBlockBuilder { self.txs.push(tx.clone()); self.total_anchored_fees += fee; - TransactionResult::success(tx, fee, receipt) + TransactionResult::success(tx, receipt) } else { // building up the microblocks if tx.anchor_mode != TransactionAnchorMode::OffChainOnly @@ -2948,7 +2947,7 @@ impl BlockBuilder for StacksBlockBuilder { self.micro_txs.push(tx.clone()); self.total_streamed_fees += fee; - TransactionResult::success(tx, fee, receipt) + TransactionResult::success(tx, receipt) }; self.bytes_so_far += tx_len; diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 0e70321784c..2008d8c4cd6 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -4999,7 +4999,6 @@ fn paramaterized_mempool_walk_test( // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index ec5fcf0ec7c..092b019b394 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -278,7 +278,6 @@ fn mempool_walk_over_fork() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -316,7 +315,6 @@ fn mempool_walk_over_fork() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -353,7 +351,6 @@ fn mempool_walk_over_fork() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -395,7 +392,6 @@ fn mempool_walk_over_fork() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -435,7 +431,6 @@ fn mempool_walk_over_fork() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -661,7 +656,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -698,7 +692,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -735,7 +728,6 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -844,7 +836,6 @@ fn test_iterate_candidates_skipped_transaction() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -959,7 +950,6 @@ fn test_iterate_candidates_processing_error_transaction() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -1074,7 +1064,6 @@ fn test_iterate_candidates_problematic_transaction() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -1226,7 +1215,6 @@ fn test_iterate_candidates_concurrent_write_lock() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -2725,7 +2713,6 @@ fn test_filter_txs_by_type() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], @@ -2760,7 +2747,6 @@ fn test_filter_txs_by_type() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], From 157cf3761c916c046168f8911b63cbd9e6109939 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 6 Mar 2025 11:48:28 +0100 Subject: [PATCH 080/238] refactor: mark fee variabile as unused, #5900 --- stackslib/src/chainstate/stacks/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 6e9da740a8a..d61bb4b7d48 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1053,7 +1053,7 @@ impl<'a> StacksMicroblockBuilder<'a> { let quiet = !cfg!(test); match StacksChainState::process_transaction(clarity_tx, &tx, quiet, ast_rules) { - Ok((fee, receipt)) => Ok(TransactionResult::success(&tx, receipt)), + Ok((_fee, receipt)) => Ok(TransactionResult::success(&tx, receipt)), Err(e) => { let (is_problematic, e) = TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); From 68ebdde787b6eb5e54eaed0f6b8b65fe26622eb6 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 6 Mar 2025 11:56:10 +0100 Subject: [PATCH 081/238] refactor: remove fee argument from TransactionResult::success_with_soft_limit, #5900 --- stackslib/src/chainstate/nakamoto/miner.rs | 4 ++-- stackslib/src/chainstate/stacks/miner.rs | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index d9ad1319f71..6b5d90d7954 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -737,7 +737,7 @@ impl BlockBuilder for NakamotoBlockBuilder { } let cost_before = clarity_tx.cost_so_far(); - let (fee, receipt) = + let (_fee, receipt) = match StacksChainState::process_transaction(clarity_tx, tx, quiet, ast_rules) { Ok(x) => x, Err(e) => { @@ -764,7 +764,7 @@ impl BlockBuilder for NakamotoBlockBuilder { // save self.txs.push(tx.clone()); - TransactionResult::success_with_soft_limit(tx, fee, receipt, soft_limit_reached) + TransactionResult::success_with_soft_limit(tx, receipt, soft_limit_reached) }; self.bytes_so_far += tx_len; diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index d61bb4b7d48..207bad16bbf 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -478,14 +478,13 @@ impl TransactionResult { /// This method logs "transaction success" as a side effect. pub fn success_with_soft_limit( transaction: &StacksTransaction, - fee: u64, receipt: StacksTransactionReceipt, soft_limit_reached: bool, ) -> TransactionResult { Self::log_transaction_success(transaction); Self::Success(TransactionSuccess { tx: transaction.clone(), - fee, + fee: transaction.get_tx_fee(), receipt, soft_limit_reached, }) From e744b1e4ba9931a7e90355e4c270db76f7d9afa4 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Thu, 6 Mar 2025 13:25:42 +0100 Subject: [PATCH 082/238] refactor: make conditional compilation based on target instead of feature --- clarity/Cargo.toml | 11 ++++++----- clarity/src/vm/analysis/mod.rs | 4 ++-- clarity/src/vm/database/clarity_store.rs | 8 ++++---- clarity/src/vm/database/mod.rs | 6 +++--- clarity/src/vm/docs/contracts.rs | 12 ++++++------ clarity/src/vm/errors.rs | 6 +++--- clarity/src/vm/mod.rs | 2 +- stacks-common/Cargo.toml | 9 +++++---- stacks-common/src/bitvec.rs | 6 +++--- stacks-common/src/types/mod.rs | 2 +- stacks-common/src/util/macros.rs | 2 +- 11 files changed, 35 insertions(+), 33 deletions(-) diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 284e856e498..61636107e64 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -27,11 +27,10 @@ regex = "1" lazy_static = "1.4.0" integer-sqrt = "0.1.3" slog = { version = "2.5.2", features = [ "max_level_trace" ] } -stacks_common = { package = "stacks-common", path = "../stacks-common", optional = true, default-features = false } +stacks_common = { package = "stacks-common", path = "../stacks-common" } rstest = "0.17.0" rstest_reuse = "0.5.0" hashbrown = { workspace = true } -rusqlite = { workspace = true, optional = true} [dependencies.serde_json] version = "1.0" @@ -41,6 +40,9 @@ features = ["arbitrary_precision", "unbounded_depth"] version = "0.2.23" features = ["std"] +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +rusqlite = { workspace = true } + [dev-dependencies] assert-json-diff = "1.0.0" mutants = "0.0.3" @@ -49,11 +51,10 @@ mutants = "0.0.3" # criterion = "0.3" [features] -default = ["canonical"] -canonical = ["rusqlite", "stacks_common/canonical"] +default = [] developer-mode = ["stacks_common/developer-mode"] slog_json = ["stacks_common/slog_json"] -testing = ["canonical"] +testing = [] devtools = [] rollback_value_check = [] disable-costs = [] diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 8dde917df9f..5f4b1af2b87 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -36,7 +36,7 @@ use self::type_checker::v2_1::TypeChecker as TypeChecker2_1; pub use self::types::{AnalysisPass, ContractAnalysis}; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::costs::LimitedCostTracker; -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] use crate::vm::database::MemoryBackingStore; use crate::vm::database::STORE_CONTRACT_SRC_INTERFACE; use crate::vm::representations::SymbolicExpression; @@ -44,7 +44,7 @@ use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; use crate::vm::ClarityVersion; /// Used by CLI tools like the docs generator. Not used in production -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] pub fn mem_type_check( snippet: &str, version: ClarityVersion, diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index a37669f499b..679a7e001d8 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] use rusqlite::Connection; use stacks_common::types::chainstate::{StacksBlockId, TrieHash}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha512Trunc256Sum}; use crate::vm::analysis::AnalysisDatabase; use crate::vm::contexts::GlobalContext; -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] use crate::vm::database::{ ClarityDatabase, ClarityDeserializable, ClaritySerializable, NULL_BURN_STATE_DB, NULL_HEADER_DB, }; @@ -85,7 +85,7 @@ pub trait ClarityBackingStore { fn get_open_chain_tip_height(&mut self) -> u32; fn get_open_chain_tip(&mut self) -> StacksBlockId; - #[cfg(feature = "canonical")] + #[cfg(not(target_arch = "wasm32"))] fn get_side_store(&mut self) -> &Connection; fn get_cc_special_cases_handler(&self) -> Option { @@ -222,7 +222,7 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't retrieve data") } - #[cfg(feature = "canonical")] + #[cfg(not(target_arch = "wasm32"))] fn get_side_store(&mut self) -> &Connection { panic!("NullBackingStore has no side store") } diff --git a/clarity/src/vm/database/mod.rs b/clarity/src/vm/database/mod.rs index 65236cd88a1..d867c148e6b 100644 --- a/clarity/src/vm/database/mod.rs +++ b/clarity/src/vm/database/mod.rs @@ -13,7 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] pub use sqlite::MemoryBackingStore; pub use self::clarity_db::{ @@ -22,7 +22,7 @@ pub use self::clarity_db::{ }; pub use self::clarity_store::{ClarityBackingStore, SpecialCaseHandler}; pub use self::key_value_wrapper::{RollbackWrapper, RollbackWrapperPersistedLog}; -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] pub use self::sqlite::SqliteConnection; pub use self::structures::{ ClarityDeserializable, ClaritySerializable, DataMapMetadata, DataVariableMetadata, @@ -32,6 +32,6 @@ pub use self::structures::{ pub mod clarity_db; pub mod clarity_store; mod key_value_wrapper; -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] pub mod sqlite; mod structures; diff --git a/clarity/src/vm/docs/contracts.rs b/clarity/src/vm/docs/contracts.rs index 70c1b3ecb25..165ed0cb808 100644 --- a/clarity/src/vm/docs/contracts.rs +++ b/clarity/src/vm/docs/contracts.rs @@ -4,13 +4,13 @@ use hashbrown::{HashMap, HashSet}; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::StacksEpochId; -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] use crate::vm::analysis::mem_type_check; use crate::vm::analysis::ContractAnalysis; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::contexts::GlobalContext; use crate::vm::costs::LimitedCostTracker; -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] use crate::vm::database::MemoryBackingStore; use crate::vm::docs::{get_input_type_string, get_output_type_string, get_signature}; use crate::vm::types::{FunctionType, QualifiedContractIdentifier, Value}; @@ -63,7 +63,7 @@ fn make_func_ref(func_name: &str, func_type: &FunctionType, description: &str) - } } -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] #[allow(clippy::expect_used)] fn get_constant_value(var_name: &str, contract_content: &str) -> Value { let to_eval = format!("{}\n{}", contract_content, var_name); @@ -72,7 +72,7 @@ fn get_constant_value(var_name: &str, contract_content: &str) -> Value { .expect("BUG: failed to return constant value") } -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] fn doc_execute(program: &str) -> Result, vm::Error> { let contract_id = QualifiedContractIdentifier::transient(); let mut contract_context = ContractContext::new(contract_id.clone(), ClarityVersion::Clarity2); @@ -99,7 +99,7 @@ fn doc_execute(program: &str) -> Result, vm::Error> { }) } -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] #[allow(clippy::expect_used)] pub fn make_docs( content: &str, @@ -185,7 +185,7 @@ pub fn make_docs( /// Produce a set of documents for multiple contracts, supplied as a list of `(contract_name, contract_content)` pairs, /// and a map from `contract_name` to corresponding `ContractSupportDocs` -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] pub fn produce_docs_refs, B: AsRef>( contracts: &[(A, B)], support_docs: &HashMap<&str, ContractSupportDocs>, diff --git a/clarity/src/vm/errors.rs b/clarity/src/vm/errors.rs index 5f2b93c1e5f..0200fb641d8 100644 --- a/clarity/src/vm/errors.rs +++ b/clarity/src/vm/errors.rs @@ -16,7 +16,7 @@ use std::{error, fmt}; -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] use rusqlite::Error as SqliteError; use serde_json::Error as SerdeJSONErr; use stacks_common::types::chainstate::BlockHeaderHash; @@ -57,7 +57,7 @@ pub enum InterpreterError { UninitializedPersistedVariable, FailedToConstructAssetTable, FailedToConstructEventBatch, - #[cfg(feature = "canonical")] + #[cfg(not(target_arch = "wasm32"))] SqliteError(IncomparableError), BadFileName, FailedToCreateDataDirectory, @@ -241,7 +241,7 @@ mod test { fn error_formats() { let t = "(/ 10 0)"; let expected = "DivisionByZero - Stack Trace: + Stack Trace: _native_:native_div "; diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 82c9b5a4db7..2c53d95f1a8 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -42,7 +42,7 @@ pub mod coverage; pub mod events; -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] pub mod tooling; #[cfg(any(test, feature = "testing"))] diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 491a6863507..95ab9414e95 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -31,7 +31,6 @@ slog-json = { version = "2.3.0", optional = true } chrono = "0.4.19" libc = "0.2.82" hashbrown = { workspace = true } -rusqlite = { workspace = true, optional = true } [target.'cfg(unix)'.dependencies] nix = "0.23" @@ -61,15 +60,17 @@ features = ["serde"] version = "0.2.23" features = ["std"] +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +rusqlite = { workspace = true } + [dev-dependencies] rand_core = { workspace = true } [features] -default = ["canonical", "developer-mode"] -canonical = ["rusqlite"] +default = ["developer-mode"] developer-mode = [] slog_json = ["slog-json"] -testing = ["canonical"] +testing = [] serde = [] bech32_std = [] bech32_strict = [] diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index 065dd5e8141..b78a969f454 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use serde::{Deserialize, Serialize}; @@ -106,7 +106,7 @@ impl<'de, const MAX_SIZE: u16> Deserialize<'de> for BitVec { } } -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] impl FromSql for BitVec { fn column_result(value: ValueRef<'_>) -> FromSqlResult { let bytes = hex_bytes(value.as_str()?).map_err(|e| FromSqlError::Other(Box::new(e)))?; @@ -115,7 +115,7 @@ impl FromSql for BitVec { } } -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] impl ToSql for BitVec { fn to_sql(&self) -> rusqlite::Result> { let hex = bytes_to_hex(self.serialize_to_vec().as_slice()); diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 3cb4a94facb..fc8a629850b 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -19,7 +19,7 @@ use std::fmt; use std::ops::{Deref, DerefMut, Index, IndexMut}; use std::sync::LazyLock; -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] pub mod sqlite; use crate::address::c32::{c32_address, c32_address_decode}; diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 4e332179e6e..5c2ec5dcad3 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -707,7 +707,7 @@ macro_rules! fmax { }} } -#[cfg(feature = "canonical")] +#[cfg(not(target_arch = "wasm32"))] macro_rules! impl_byte_array_rusqlite_only { ($thing:ident) => { impl rusqlite::types::FromSql for $thing { From 5be97d35ee157a7731c3ca3f0d1786b9be6275d7 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Thu, 6 Mar 2025 16:09:10 +0100 Subject: [PATCH 083/238] refactor: use target_family instead of target_arch --- clarity/Cargo.toml | 2 +- clarity/src/vm/analysis/mod.rs | 4 ++-- clarity/src/vm/database/clarity_store.rs | 8 ++++---- clarity/src/vm/database/mod.rs | 6 +++--- clarity/src/vm/docs/contracts.rs | 12 ++++++------ clarity/src/vm/errors.rs | 4 ++-- clarity/src/vm/mod.rs | 2 +- stacks-common/Cargo.toml | 2 +- stacks-common/src/bitvec.rs | 6 +++--- stacks-common/src/types/mod.rs | 2 +- stacks-common/src/util/macros.rs | 2 +- 11 files changed, 25 insertions(+), 25 deletions(-) diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 61636107e64..fb0a191d6f7 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -40,7 +40,7 @@ features = ["arbitrary_precision", "unbounded_depth"] version = "0.2.23" features = ["std"] -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +[target.'cfg(not(target_family = "wasm"))'.dependencies] rusqlite = { workspace = true } [dev-dependencies] diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 5f4b1af2b87..10717100e2e 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -36,7 +36,7 @@ use self::type_checker::v2_1::TypeChecker as TypeChecker2_1; pub use self::types::{AnalysisPass, ContractAnalysis}; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::costs::LimitedCostTracker; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] use crate::vm::database::MemoryBackingStore; use crate::vm::database::STORE_CONTRACT_SRC_INTERFACE; use crate::vm::representations::SymbolicExpression; @@ -44,7 +44,7 @@ use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; use crate::vm::ClarityVersion; /// Used by CLI tools like the docs generator. Not used in production -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] pub fn mem_type_check( snippet: &str, version: ClarityVersion, diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index 679a7e001d8..3d1e4c975ec 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] use rusqlite::Connection; use stacks_common::types::chainstate::{StacksBlockId, TrieHash}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha512Trunc256Sum}; use crate::vm::analysis::AnalysisDatabase; use crate::vm::contexts::GlobalContext; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] use crate::vm::database::{ ClarityDatabase, ClarityDeserializable, ClaritySerializable, NULL_BURN_STATE_DB, NULL_HEADER_DB, }; @@ -85,7 +85,7 @@ pub trait ClarityBackingStore { fn get_open_chain_tip_height(&mut self) -> u32; fn get_open_chain_tip(&mut self) -> StacksBlockId; - #[cfg(not(target_arch = "wasm32"))] + #[cfg(not(target_family = "wasm"))] fn get_side_store(&mut self) -> &Connection; fn get_cc_special_cases_handler(&self) -> Option { @@ -222,7 +222,7 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't retrieve data") } - #[cfg(not(target_arch = "wasm32"))] + #[cfg(not(target_family = "wasm"))] fn get_side_store(&mut self) -> &Connection { panic!("NullBackingStore has no side store") } diff --git a/clarity/src/vm/database/mod.rs b/clarity/src/vm/database/mod.rs index d867c148e6b..2ebeb00dfd7 100644 --- a/clarity/src/vm/database/mod.rs +++ b/clarity/src/vm/database/mod.rs @@ -13,7 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] pub use sqlite::MemoryBackingStore; pub use self::clarity_db::{ @@ -22,7 +22,7 @@ pub use self::clarity_db::{ }; pub use self::clarity_store::{ClarityBackingStore, SpecialCaseHandler}; pub use self::key_value_wrapper::{RollbackWrapper, RollbackWrapperPersistedLog}; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] pub use self::sqlite::SqliteConnection; pub use self::structures::{ ClarityDeserializable, ClaritySerializable, DataMapMetadata, DataVariableMetadata, @@ -32,6 +32,6 @@ pub use self::structures::{ pub mod clarity_db; pub mod clarity_store; mod key_value_wrapper; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] pub mod sqlite; mod structures; diff --git a/clarity/src/vm/docs/contracts.rs b/clarity/src/vm/docs/contracts.rs index 165ed0cb808..230bb2a2fdf 100644 --- a/clarity/src/vm/docs/contracts.rs +++ b/clarity/src/vm/docs/contracts.rs @@ -4,13 +4,13 @@ use hashbrown::{HashMap, HashSet}; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::StacksEpochId; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] use crate::vm::analysis::mem_type_check; use crate::vm::analysis::ContractAnalysis; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::contexts::GlobalContext; use crate::vm::costs::LimitedCostTracker; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] use crate::vm::database::MemoryBackingStore; use crate::vm::docs::{get_input_type_string, get_output_type_string, get_signature}; use crate::vm::types::{FunctionType, QualifiedContractIdentifier, Value}; @@ -63,7 +63,7 @@ fn make_func_ref(func_name: &str, func_type: &FunctionType, description: &str) - } } -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] #[allow(clippy::expect_used)] fn get_constant_value(var_name: &str, contract_content: &str) -> Value { let to_eval = format!("{}\n{}", contract_content, var_name); @@ -72,7 +72,7 @@ fn get_constant_value(var_name: &str, contract_content: &str) -> Value { .expect("BUG: failed to return constant value") } -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] fn doc_execute(program: &str) -> Result, vm::Error> { let contract_id = QualifiedContractIdentifier::transient(); let mut contract_context = ContractContext::new(contract_id.clone(), ClarityVersion::Clarity2); @@ -99,7 +99,7 @@ fn doc_execute(program: &str) -> Result, vm::Error> { }) } -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] #[allow(clippy::expect_used)] pub fn make_docs( content: &str, @@ -185,7 +185,7 @@ pub fn make_docs( /// Produce a set of documents for multiple contracts, supplied as a list of `(contract_name, contract_content)` pairs, /// and a map from `contract_name` to corresponding `ContractSupportDocs` -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] pub fn produce_docs_refs, B: AsRef>( contracts: &[(A, B)], support_docs: &HashMap<&str, ContractSupportDocs>, diff --git a/clarity/src/vm/errors.rs b/clarity/src/vm/errors.rs index 0200fb641d8..69b623ea388 100644 --- a/clarity/src/vm/errors.rs +++ b/clarity/src/vm/errors.rs @@ -16,7 +16,7 @@ use std::{error, fmt}; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] use rusqlite::Error as SqliteError; use serde_json::Error as SerdeJSONErr; use stacks_common::types::chainstate::BlockHeaderHash; @@ -57,7 +57,7 @@ pub enum InterpreterError { UninitializedPersistedVariable, FailedToConstructAssetTable, FailedToConstructEventBatch, - #[cfg(not(target_arch = "wasm32"))] + #[cfg(not(target_family = "wasm"))] SqliteError(IncomparableError), BadFileName, FailedToCreateDataDirectory, diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 2c53d95f1a8..9ab3f3a5f45 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -42,7 +42,7 @@ pub mod coverage; pub mod events; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] pub mod tooling; #[cfg(any(test, feature = "testing"))] diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 95ab9414e95..441af02d580 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -60,7 +60,7 @@ features = ["serde"] version = "0.2.23" features = ["std"] -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +[target.'cfg(not(target_family = "wasm"))'.dependencies] rusqlite = { workspace = true } [dev-dependencies] diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index b78a969f454..b5f6c3b26d9 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use serde::{Deserialize, Serialize}; @@ -106,7 +106,7 @@ impl<'de, const MAX_SIZE: u16> Deserialize<'de> for BitVec { } } -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] impl FromSql for BitVec { fn column_result(value: ValueRef<'_>) -> FromSqlResult { let bytes = hex_bytes(value.as_str()?).map_err(|e| FromSqlError::Other(Box::new(e)))?; @@ -115,7 +115,7 @@ impl FromSql for BitVec { } } -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] impl ToSql for BitVec { fn to_sql(&self) -> rusqlite::Result> { let hex = bytes_to_hex(self.serialize_to_vec().as_slice()); diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index fc8a629850b..94ca15b7381 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -19,7 +19,7 @@ use std::fmt; use std::ops::{Deref, DerefMut, Index, IndexMut}; use std::sync::LazyLock; -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] pub mod sqlite; use crate::address::c32::{c32_address, c32_address_decode}; diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 5c2ec5dcad3..563babf905e 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -707,7 +707,7 @@ macro_rules! fmax { }} } -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(target_family = "wasm"))] macro_rules! impl_byte_array_rusqlite_only { ($thing:ident) => { impl rusqlite::types::FromSql for $thing { From a9e988f643c69a7da97bb43c7a9166b7944cd110 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 6 Mar 2025 15:38:08 -0600 Subject: [PATCH 084/238] fix: http endpoints should tolerate duplicate headers #5903 --- .../net/api/tests/get_tenures_fork_info.rs | 1 + stackslib/src/net/api/tests/getsigner.rs | 1 + stackslib/src/net/api/tests/getsortition.rs | 1 + stackslib/src/net/http/request.rs | 35 +++++++++--- stackslib/src/net/http/tests.rs | 56 ++++++++++++++++++- 5 files changed, 86 insertions(+), 8 deletions(-) diff --git a/stackslib/src/net/api/tests/get_tenures_fork_info.rs b/stackslib/src/net/api/tests/get_tenures_fork_info.rs index 2b5abcfb362..360f75f0fc6 100644 --- a/stackslib/src/net/api/tests/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/tests/get_tenures_fork_info.rs @@ -37,6 +37,7 @@ fn make_preamble(start: &T, stop: &R) -> HttpRequestPrea content_length: Some(0), keep_alive: false, headers: BTreeMap::new(), + set_cookie: Vec::new(), } } diff --git a/stackslib/src/net/api/tests/getsigner.rs b/stackslib/src/net/api/tests/getsigner.rs index 381706c50e7..612a478517c 100644 --- a/stackslib/src/net/api/tests/getsigner.rs +++ b/stackslib/src/net/api/tests/getsigner.rs @@ -41,6 +41,7 @@ fn make_preamble(query: &str) -> HttpRequestPreamble { content_length: Some(0), keep_alive: false, headers: BTreeMap::new(), + set_cookie: Vec::new(), } } diff --git a/stackslib/src/net/api/tests/getsortition.rs b/stackslib/src/net/api/tests/getsortition.rs index 5a8e9ae034b..a961f43f581 100644 --- a/stackslib/src/net/api/tests/getsortition.rs +++ b/stackslib/src/net/api/tests/getsortition.rs @@ -40,6 +40,7 @@ fn make_preamble(query: &str) -> HttpRequestPreamble { content_length: Some(0), keep_alive: false, headers: BTreeMap::new(), + set_cookie: Vec::new(), } } diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index 8ccb2141462..f875d61f69e 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -14,7 +14,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::btree_map::Entry; use std::collections::{BTreeMap, HashMap, HashSet}; +use std::fmt::Display; use std::io::{Read, Write}; use percent_encoding::percent_decode_str; @@ -54,6 +56,8 @@ pub struct HttpRequestPreamble { pub keep_alive: bool, /// Other headers that were not consumed in parsing pub headers: BTreeMap, + /// `Set-Cookie` headers + pub set_cookie: Vec, } impl HttpRequestPreamble { @@ -74,6 +78,7 @@ impl HttpRequestPreamble { content_length: None, keep_alive, headers: BTreeMap::new(), + set_cookie: vec![], } } @@ -105,6 +110,7 @@ impl HttpRequestPreamble { content_length: None, keep_alive: true, headers: BTreeMap::new(), + set_cookie: vec![], } } @@ -187,10 +193,10 @@ impl HttpRequestPreamble { return Some(format!("{}", &self.host)); } "content-type" => { - return self.content_type.clone().map(|ct| format!("{}", &ct)); + return self.content_type.as_ref().map(HttpContentType::to_string); } "content-length" => { - return self.content_length.clone().map(|cl| format!("{}", &cl)); + return self.content_length.as_ref().map(u32::to_string); } _ => { return self.headers.get(&hdr).cloned(); @@ -371,9 +377,10 @@ impl StacksMessageCodec for HttpRequestPreamble { let mut headers: BTreeMap = BTreeMap::new(); let mut seen_headers: HashSet = HashSet::new(); + let mut set_cookie = vec![]; - for i in 0..req.headers.len() { - let value = String::from_utf8(req.headers[i].value.to_vec()).map_err(|_e| { + for req_header in req.headers.iter() { + let value = String::from_utf8(req_header.value.to_vec()).map_err(|_e| { CodecError::DeserializeError( "Invalid HTTP header value: not utf-8".to_string(), ) @@ -389,7 +396,7 @@ impl StacksMessageCodec for HttpRequestPreamble { )); } - let key = req.headers[i].name.to_string().to_lowercase(); + let key = req_header.name.to_lowercase(); if seen_headers.contains(&key) { return Err(CodecError::DeserializeError(format!( @@ -397,23 +404,25 @@ impl StacksMessageCodec for HttpRequestPreamble { key ))); } - seen_headers.insert(key.clone()); if key == "host" { peerhost = match value.parse::() { Ok(ph) => Some(ph), Err(_) => None, }; + seen_headers.insert(key); } else if key == "content-type" { // parse let ctype = value.to_lowercase().parse::()?; content_type = Some(ctype); + seen_headers.insert(key); } else if key == "content-length" { // parse content_length = match value.parse::() { Ok(len) => Some(len), Err(_) => None, }; + seen_headers.insert(key); } else if key == "connection" { // parse if value.to_lowercase() == "close" { @@ -425,8 +434,19 @@ impl StacksMessageCodec for HttpRequestPreamble { "Inavlid HTTP request: invalid Connection: header".to_string(), )); } + seen_headers.insert(key); + } else if key == "set-cookie" { + set_cookie.push(value); } else { - headers.insert(key, value); + match headers.entry(key) { + Entry::Vacant(vacant_entry) => { + vacant_entry.insert(value); + } + Entry::Occupied(mut occupied_entry) => { + occupied_entry.get_mut().push_str(", "); + occupied_entry.get_mut().push_str(&value); + } + } } } @@ -445,6 +465,7 @@ impl StacksMessageCodec for HttpRequestPreamble { content_length, keep_alive, headers, + set_cookie, }) } } diff --git a/stackslib/src/net/http/tests.rs b/stackslib/src/net/http/tests.rs index 55747e18fa9..3952187a4f9 100644 --- a/stackslib/src/net/http/tests.rs +++ b/stackslib/src/net/http/tests.rs @@ -14,7 +14,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use stacks_common::codec::StacksMessageCodec; +use std::collections::BTreeMap; + +use stacks_common::codec::{Error as CodecError, StacksMessageCodec}; use stacks_common::types::net::{PeerAddress, PeerHost}; use crate::net::http::common::{HTTP_PREAMBLE_MAX_ENCODED_SIZE, HTTP_PREAMBLE_MAX_NUM_HEADERS}; @@ -78,6 +80,58 @@ fn test_parse_reserved_header() { } } +#[test] +fn parse_http_request_duplicate_headers() { + let tests = vec![ + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nCache-Control: no-cache\r\ncache-control: no-store\r\nConnection: close\r\n\r\n", + Ok(BTreeMap::from([("cache-control".to_string(), "no-cache, no-store".to_string())]))), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nCache-Control: no-store\r\ncache-control: no-cache\r\nConnection: close\r\n\r\n", + Ok(BTreeMap::from([("cache-control".into(), "no-store, no-cache".into())]))), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nHost: core2.blockstack.org\r\nConnection: close\r\n\r\n", + Err(CodecError::DeserializeError("Invalid HTTP request: duplicate header \"host\"".into()))), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nConnection: close\r\nConnection: keep-alive\r\n\r\n", + Err(CodecError::DeserializeError("Invalid HTTP request: duplicate header \"connection\"".into()))), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nContent-Type: application/json\r\nContent-Type: application/json\r\nConnection: close\r\n\r\n", + Err(CodecError::DeserializeError("Invalid HTTP request: duplicate header \"content-type\"".into()))), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nContent-Length: 10\r\nContent-length: 5\r\nConnection: close\r\n\r\n", + Err(CodecError::DeserializeError("Invalid HTTP request: duplicate header \"content-length\"".into()))), + ]; + + for (data, expected) in tests.into_iter() { + let result = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()); + match result { + Ok(req) => { + let expected = expected.unwrap(); + assert_eq!(req.headers, expected); + } + Err(e) => { + let expected = expected.unwrap_err(); + assert_eq!(format!("{expected:?}"), format!("{e:?}")); + } + } + } +} + +#[test] +fn parse_http_request_set_cookie() { + let tests = vec![ + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nConnection: close\r\n\r\n", + vec![]), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nset-Cookie: a1\r\nSet-Cookie: a2\r\nConnection: close\r\n\r\n", + vec!["a1".to_string(), "a2".to_string()]), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nset-Cookie: a2\r\nSet-Cookie: a1\r\nConnection: close\r\n\r\n", + vec!["a2".to_string(), "a1".to_string()]), + ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nset-Cookie: a1\r\nConnection: close\r\n\r\n", + vec!["a1".to_string()]), + ]; + + for (data, expected) in tests.into_iter() { + let req = HttpRequestPreamble::consensus_deserialize(&mut data.as_bytes()) + .expect("Should be able to parse the set-cookie requests"); + assert_eq!(req.set_cookie, expected); + } +} + #[test] fn test_parse_http_request_preamble_ok() { let tests = vec![ From f251a4a2fe09f63de8085749039750791241af5f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 6 Mar 2025 17:14:52 -0500 Subject: [PATCH 085/238] fix: remove the `candidate_cache` during mempool iteration Now that the db query is taking the nonces into account, it no longer makes sense to cache these candidate transactions. On the next query, their nonces should be properly set in the db, so that will get ordered correctly. This cache would only make performance worse in that case. --- stackslib/src/core/mempool.rs | 103 ++++++++++++++-------------------- 1 file changed, 41 insertions(+), 62 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index b32c6637dc2..2d4371e1d85 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1562,7 +1562,6 @@ impl MemPoolDB { debug!("Mempool walk for {}ms", settings.max_walk_time_ms,); - let mut candidate_cache = CandidateCache::new(settings.candidate_retry_cache_size); let mut nonce_cache = NonceCache::new(settings.nonce_cache_size); // == Queries for `GlobalFeeRate` mempool walk strategy @@ -1649,58 +1648,29 @@ impl MemPoolDB { } // First, try to read from the retry list - let (candidate, update_estimate) = match candidate_cache.next() { - Some(tx) => { - let update_estimate = tx.fee_rate.is_none(); - (tx, update_estimate) - } - None => { - // When the retry list is empty, read from the mempool db depending on the configured miner strategy - match settings.strategy { - MemPoolWalkStrategy::GlobalFeeRate => { - let start_with_no_estimate = tx_consideration_sampler.sample(&mut rng) - < settings.consider_no_estimate_tx_prob; - // randomly select from either the null fee-rate transactions or those with fee-rate estimates. - let opt_tx = if start_with_no_estimate { - null_iterator.next().map_err(Error::SqliteError)? - } else { + let (candidate, update_estimate) = match settings.strategy { + MemPoolWalkStrategy::GlobalFeeRate => { + let start_with_no_estimate = tx_consideration_sampler.sample(&mut rng) + < settings.consider_no_estimate_tx_prob; + // randomly select from either the null fee-rate transactions or those with fee-rate estimates. + let opt_tx = if start_with_no_estimate { + null_iterator.next().map_err(Error::SqliteError)? + } else { + fee_iterator.next().map_err(Error::SqliteError)? + }; + match opt_tx { + Some(row) => (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate), + None => { + // If the selected iterator is empty, check the other + match if start_with_no_estimate { fee_iterator.next().map_err(Error::SqliteError)? - }; - match opt_tx { - Some(row) => { - (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate) - } - None => { - // If the selected iterator is empty, check the other - match if start_with_no_estimate { - fee_iterator.next().map_err(Error::SqliteError)? - } else { - null_iterator.next().map_err(Error::SqliteError)? - } { - Some(row) => ( - MemPoolTxInfoPartial::from_row(row)?, - !start_with_no_estimate, - ), - None => { - debug!("No more transactions to consider in mempool"); - break MempoolIterationStopReason::NoMoreCandidates; - } - } - } - } - } - MemPoolWalkStrategy::NextNonceWithHighestFeeRate => { - match query_stmt_nonce_rank - .query(NO_PARAMS) - .map_err(Error::SqliteError)? - .next() - .map_err(Error::SqliteError)? - { - Some(row) => { - let tx = MemPoolTxInfoPartial::from_row(row)?; - let update_estimate = tx.fee_rate.is_none(); - (tx, update_estimate) - } + } else { + null_iterator.next().map_err(Error::SqliteError)? + } { + Some(row) => ( + MemPoolTxInfoPartial::from_row(row)?, + !start_with_no_estimate, + ), None => { debug!("No more transactions to consider in mempool"); break MempoolIterationStopReason::NoMoreCandidates; @@ -1709,6 +1679,24 @@ impl MemPoolDB { } } } + MemPoolWalkStrategy::NextNonceWithHighestFeeRate => { + match query_stmt_nonce_rank + .query(NO_PARAMS) + .map_err(Error::SqliteError)? + .next() + .map_err(Error::SqliteError)? + { + Some(row) => { + let tx = MemPoolTxInfoPartial::from_row(row)?; + let update_estimate = tx.fee_rate.is_none(); + (tx, update_estimate) + } + None => { + debug!("No more transactions to consider in mempool"); + break MempoolIterationStopReason::NoMoreCandidates; + } + } + } }; // Check the nonces. @@ -1739,7 +1727,7 @@ impl MemPoolDB { } Ordering::Greater => { debug!( - "Mempool: nonces too high, cached for later"; + "Mempool: nonces too high"; "txid" => %candidate.txid, "tx_origin_addr" => %candidate.origin_address, "tx_origin_nonce" => candidate.origin_nonce, @@ -1747,8 +1735,6 @@ impl MemPoolDB { "expected_origin_nonce" => expected_origin_nonce, "expected_sponsor_nonce" => expected_sponsor_nonce, ); - // This transaction could become runnable in this pass, save it for later - candidate_cache.push(candidate); continue; } Ordering::Equal => { @@ -1856,13 +1842,6 @@ impl MemPoolDB { break MempoolIterationStopReason::IteratorExited; } } - - // Reset for finding the next transaction to process - debug!( - "Mempool: reset: retry list has {} entries", - candidate_cache.len() - ); - candidate_cache.reset(); }; // drop these rusqlite statements and queries, since their existence as immutable borrows on the From 1587527b4b15921cada474a6bea92c5176db7e1b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 7 Mar 2025 10:05:42 -0600 Subject: [PATCH 086/238] use "and_modify/or_insert" instead of match --- stackslib/src/net/http/request.rs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index f875d61f69e..aa2c3194192 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -438,15 +438,13 @@ impl StacksMessageCodec for HttpRequestPreamble { } else if key == "set-cookie" { set_cookie.push(value); } else { - match headers.entry(key) { - Entry::Vacant(vacant_entry) => { - vacant_entry.insert(value); - } - Entry::Occupied(mut occupied_entry) => { - occupied_entry.get_mut().push_str(", "); - occupied_entry.get_mut().push_str(&value); - } - } + headers + .entry(key) + .and_modify(|entry| { + entry.push_str(", "); + entry.push_str(&value); + }) + .or_insert(value); } } From 40961abab5345bc7b3e2c55d16cf7ec5c2c42e5a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 7 Mar 2025 13:46:16 -0500 Subject: [PATCH 087/238] fix: put `fmt-stacks` back, since we can't force `nightly` --- .cargo/config.toml | 2 +- .github/workflows/ci.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 3c6d5f7019e..41566339d9c 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,5 +1,6 @@ [alias] stacks-node = "run --package stacks-node --" +fmt-stacks = "fmt -- --config group_imports=StdExternalCrate,imports_granularity=Module" clippy-stacks = "clippy -p libstackerdb -p stacks-signer -p pox-locking -p clarity -p libsigner -p stacks-common --no-deps --tests --all-features -- -D warnings" # Uncomment to improve performance slightly, at the cost of portability @@ -11,4 +12,3 @@ clippy-stacks = "clippy -p libstackerdb -p stacks-signer -p pox-locking -p clari #[target.x86_64-unknown-linux-gnu] #linker = "/usr/bin/clang" #rustflags = ["-Clink-arg=-fuse-ld=lld", "-Clink-arg=-Wl,--no-rosegment"] - diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1aedf269e7c..e32148c06fc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -48,7 +48,7 @@ jobs: id: rustfmt uses: stacks-network/actions/rustfmt@main with: - alias: "+nightly fmt" + alias: "fmt-stacks" ###################################################################################### ## Check if the branch that this workflow is being run against is a release branch From 6893a2a3491ffb1dd8f41a61a0803e6d6d4f0c72 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 7 Mar 2025 14:04:48 -0500 Subject: [PATCH 088/238] fix: resolve DB lock issue --- stackslib/src/core/mempool.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 2d4371e1d85..0f6e5e014be 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1639,6 +1639,9 @@ impl MemPoolDB { LIMIT 1 "; let mut query_stmt_nonce_rank = self.db.prepare(&sql).map_err(Error::SqliteError)?; + let mut nonce_rank_iterator = query_stmt_nonce_rank + .query(NO_PARAMS) + .map_err(Error::SqliteError)?; let stop_reason = loop { if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { @@ -1680,12 +1683,7 @@ impl MemPoolDB { } } MemPoolWalkStrategy::NextNonceWithHighestFeeRate => { - match query_stmt_nonce_rank - .query(NO_PARAMS) - .map_err(Error::SqliteError)? - .next() - .map_err(Error::SqliteError)? - { + match nonce_rank_iterator.next().map_err(Error::SqliteError)? { Some(row) => { let tx = MemPoolTxInfoPartial::from_row(row)?; let update_estimate = tx.fee_rate.is_none(); @@ -1851,6 +1849,7 @@ impl MemPoolDB { drop(query_stmt_null); drop(fee_iterator); drop(query_stmt_fee); + drop(nonce_rank_iterator); drop(query_stmt_nonce_rank); // Write through the nonce cache to the database From 167b243e02b9100066cd0684a4951b5f5bd0f58b Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Sun, 9 Mar 2025 17:53:53 -0700 Subject: [PATCH 089/238] feat: move pending payload processing to EventDispatcher --- testnet/stacks-node/src/event_dispatcher.rs | 403 +++++++++++-------- testnet/stacks-node/src/node.rs | 5 +- testnet/stacks-node/src/run_loop/nakamoto.rs | 7 +- testnet/stacks-node/src/run_loop/neon.rs | 5 +- 4 files changed, 250 insertions(+), 170 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 45f563238ef..f7bb80fce52 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -82,7 +82,7 @@ lazy_static! { } #[derive(Debug, Clone)] -struct EventObserver { +pub struct EventObserver { /// Path to the database where pending payloads are stored. If `None`, then /// the database is not used and events are not recoverable across restarts. db_path: Option, @@ -335,20 +335,6 @@ impl RewardSetEventPayload { static TEST_EVENT_OBSERVER_SKIP_RETRY: LazyLock> = LazyLock::new(TestFlag::default); impl EventObserver { - fn init_db(db_path: &str) -> Result { - let conn = Connection::open(db_path)?; - conn.execute( - "CREATE TABLE IF NOT EXISTS pending_payloads ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - url TEXT NOT NULL, - payload TEXT NOT NULL, - timeout INTEGER NOT NULL - )", - [], - )?; - Ok(conn) - } - fn insert_payload( conn: &Connection, url: &str, @@ -400,76 +386,16 @@ impl EventObserver { } } - fn get_pending_payloads( - conn: &Connection, - ) -> Result, db_error> { - let mut stmt = - conn.prepare("SELECT id, url, payload, timeout FROM pending_payloads ORDER BY id")?; - let payload_iter = stmt.query_and_then( - [], - |row| -> Result<(i64, String, serde_json::Value, u64), db_error> { - let id: i64 = row.get(0)?; - let url: String = row.get(1)?; - let payload_text: String = row.get(2)?; - let payload: serde_json::Value = - serde_json::from_str(&payload_text).map_err(db_error::SerializationError)?; - let timeout_ms: u64 = row.get(3)?; - Ok((id, url, payload, timeout_ms)) - }, - )?; - payload_iter.collect() - } - fn delete_payload(conn: &Connection, id: i64) -> Result<(), db_error> { conn.execute("DELETE FROM pending_payloads WHERE id = ?1", params![id])?; Ok(()) } - fn process_pending_payloads(&self, conn: &Connection) { - let pending_payloads = match Self::get_pending_payloads(conn) { - Ok(payloads) => payloads, - Err(e) => { - error!( - "Event observer: failed to retrieve pending payloads from database"; - "error" => ?e - ); - return; - } - }; - - for (id, url, payload, timeout_ms) in pending_payloads { - let full_url = Url::parse(url.as_str()) - .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {url} as a URL")); - let endport_url = Url::parse(format!("http://{}", &self.endpoint).as_str()) - .unwrap_or_else(|_| { - panic!( - "Event dispatcher: unable to parse {} as a URL", - &self.endpoint - ) - }); - // If the URL is not the same as the endpoint, skip it - if full_url.origin() != endport_url.origin() { - continue; - } - let timeout = Duration::from_millis(timeout_ms); - Self::send_payload_directly(&payload, &url, timeout); - - #[cfg(test)] - if TEST_EVENT_OBSERVER_SKIP_RETRY.get() { - warn!("Fault injection: delete_payload"); - return; - } - - if let Err(e) = Self::delete_payload(conn, id) { - error!( - "Event observer: failed to delete pending payload from database"; - "error" => ?e - ); - } - } - } - - fn send_payload_directly(payload: &serde_json::Value, full_url: &str, timeout: Duration) { + fn send_payload_directly( + payload: &serde_json::Value, + full_url: &str, + timeout: Duration, + ) -> bool { debug!( "Event dispatcher: Sending payload"; "url" => %full_url, "payload" => ?payload ); @@ -522,7 +448,7 @@ impl EventObserver { #[cfg(test)] if TEST_EVENT_OBSERVER_SKIP_RETRY.get() { warn!("Fault injection: skipping retry of payload"); - return; + return false; } sleep(backoff); @@ -533,23 +459,10 @@ impl EventObserver { ); attempts = attempts.saturating_add(1); } + true } - fn new(working_dir: Option, endpoint: String, timeout: Duration) -> Self { - let db_path = if let Some(mut db_path) = working_dir { - db_path.push("event_observers.sqlite"); - - Self::init_db( - db_path - .to_str() - .expect("Failed to convert chainstate path to string"), - ) - .expect("Failed to initialize database for event observer"); - Some(db_path) - } else { - None - }; - + fn new(db_path: Option, endpoint: String, timeout: Duration) -> Self { EventObserver { db_path, endpoint, @@ -559,7 +472,7 @@ impl EventObserver { /// Send the payload to the given URL. /// Before sending this payload, any pending payloads in the database will be sent first. - pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { + pub fn send_payload(&self, payload: &serde_json::Value, path: &str, id: Option) { // Construct the full URL let url_str = if path.starts_with('/') { format!("{}{path}", &self.endpoint) @@ -572,11 +485,26 @@ impl EventObserver { let conn = Connection::open(db_path).expect("Failed to open database for event observer"); - // Insert the new payload into the database - Self::insert_payload_with_retry(&conn, &full_url, payload, self.timeout); + let id = match id { + Some(id) => id, + None => { + Self::insert_payload_with_retry(&conn, &full_url, payload, self.timeout); + conn.last_insert_rowid() + } + }; + + let success = Self::send_payload_directly(payload, &full_url, self.timeout); + // This is only `false` when the TestFlag is set to skip retries + if !success { + return; + } - // Process all pending payloads - self.process_pending_payloads(&conn); + if let Err(e) = Self::delete_payload(&conn, id) { + error!( + "Event observer: failed to delete pending payload from database"; + "error" => ?e + ); + } } else { // No database, just send the payload Self::send_payload_directly(payload, &full_url, self.timeout); @@ -728,11 +656,11 @@ impl EventObserver { } fn send_new_attachments(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_ATTACHMENT_PROCESSED); + self.send_payload(payload, PATH_ATTACHMENT_PROCESSED, None); } fn send_new_mempool_txs(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_MEMPOOL_TX_SUBMIT); + self.send_payload(payload, PATH_MEMPOOL_TX_SUBMIT, None); } /// Serializes new microblocks data into a JSON payload and sends it off to the correct path @@ -764,31 +692,31 @@ impl EventObserver { "burn_block_timestamp": burn_block_timestamp, }); - self.send_payload(&payload, PATH_MICROBLOCK_SUBMIT); + self.send_payload(&payload, PATH_MICROBLOCK_SUBMIT, None); } fn send_dropped_mempool_txs(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_MEMPOOL_TX_DROP); + self.send_payload(payload, PATH_MEMPOOL_TX_DROP, None); } fn send_mined_block(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_MINED_BLOCK); + self.send_payload(payload, PATH_MINED_BLOCK, None); } fn send_mined_microblock(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_MINED_MICROBLOCK); + self.send_payload(payload, PATH_MINED_MICROBLOCK, None); } fn send_mined_nakamoto_block(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_MINED_NAKAMOTO_BLOCK); + self.send_payload(payload, PATH_MINED_NAKAMOTO_BLOCK, None); } fn send_stackerdb_chunks(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_STACKERDB_CHUNKS); + self.send_payload(payload, PATH_STACKERDB_CHUNKS, None); } fn send_new_burn_block(&self, payload: &serde_json::Value) { - self.send_payload(payload, PATH_BURN_BLOCK_SUBMIT); + self.send_payload(payload, PATH_BURN_BLOCK_SUBMIT, None); } #[allow(clippy::too_many_arguments)] @@ -936,6 +864,8 @@ pub struct EventDispatcher { block_proposal_observers_lookup: HashSet, /// Channel for sending StackerDB events to the miner coordinator pub stackerdb_channel: Arc>, + /// Database path for pending payloads + db_path: Option, } /// This struct is used specifically for receiving proposal responses. @@ -957,7 +887,7 @@ impl ProposalCallbackReceiver for ProposalCallbackHandler { } }; for observer in self.observers.iter() { - observer.send_payload(&response, PATH_PROPOSAL_RESPONSE); + observer.send_payload(&response, PATH_PROPOSAL_RESPONSE, None); } } } @@ -1128,12 +1058,18 @@ impl BlockEventDispatcher for EventDispatcher { impl Default for EventDispatcher { fn default() -> Self { - EventDispatcher::new() + EventDispatcher::new(None) } } impl EventDispatcher { - pub fn new() -> EventDispatcher { + pub fn new(working_dir: Option) -> EventDispatcher { + let db_path = if let Some(mut db_path) = working_dir { + db_path.push("event_observers.sqlite"); + Some(db_path) + } else { + None + }; EventDispatcher { stackerdb_channel: Arc::new(Mutex::new(StackerDBChannel::new())), registered_observers: vec![], @@ -1148,6 +1084,7 @@ impl EventDispatcher { mined_microblocks_observers_lookup: HashSet::new(), stackerdb_observers_lookup: HashSet::new(), block_proposal_observers_lookup: HashSet::new(), + db_path, } } @@ -1361,7 +1298,11 @@ impl EventDispatcher { ); // Send payload - self.registered_observers[observer_id].send_payload(&payload, PATH_BLOCK_PROCESSED); + self.registered_observers[observer_id].send_payload( + &payload, + PATH_BLOCK_PROCESSED, + None, + ); } } } @@ -1673,10 +1614,10 @@ impl EventDispatcher { } } - pub fn register_observer(&mut self, conf: &EventObserverConfig, working_dir: PathBuf) { + pub fn register_observer(&mut self, conf: &EventObserverConfig) -> EventObserver { info!("Registering event observer at: {}", conf.endpoint); let event_observer = EventObserver::new( - Some(working_dir), + self.db_path.clone(), conf.endpoint.clone(), Duration::from_millis(conf.timeout_ms), ); @@ -1743,7 +1684,119 @@ impl EventDispatcher { } } - self.registered_observers.push(event_observer); + self.registered_observers.push(event_observer.clone()); + + event_observer + } + + fn init_db(db_path: &PathBuf) -> Result { + let conn = Connection::open(db_path.to_str().unwrap())?; + conn.execute( + "CREATE TABLE IF NOT EXISTS pending_payloads ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + url TEXT NOT NULL, + payload TEXT NOT NULL, + timeout INTEGER NOT NULL + )", + [], + )?; + Ok(conn) + } + + fn get_pending_payloads( + conn: &Connection, + ) -> Result, db_error> { + let mut stmt = + conn.prepare("SELECT id, url, payload, timeout FROM pending_payloads ORDER BY id")?; + let payload_iter = stmt.query_and_then( + [], + |row| -> Result<(i64, String, serde_json::Value, u64), db_error> { + let id: i64 = row.get(0)?; + let url: String = row.get(1)?; + let payload_text: String = row.get(2)?; + let payload: serde_json::Value = + serde_json::from_str(&payload_text).map_err(db_error::SerializationError)?; + let timeout_ms: u64 = row.get(3)?; + Ok((id, url, payload, timeout_ms)) + }, + )?; + payload_iter.collect() + } + + fn delete_payload(conn: &Connection, id: i64) -> Result<(), db_error> { + conn.execute("DELETE FROM pending_payloads WHERE id = ?1", params![id])?; + Ok(()) + } + + /// Process any pending payloads in the database. + /// This is called when the event dispatcher is first instantiated. + pub fn process_pending_payloads(&self) { + let Some(db_path) = &self.db_path else { + return; + }; + let conn = EventDispatcher::init_db(db_path).expect("Failed to initialize database"); + let pending_payloads = match Self::get_pending_payloads(&conn) { + Ok(payloads) => payloads, + Err(e) => { + error!( + "Event observer: failed to retrieve pending payloads from database"; + "error" => ?e + ); + return; + } + }; + + info!( + "Event dispatcher: processing {} pending payloads", + pending_payloads.len() + ); + + for (id, url, payload, _timeout_ms) in pending_payloads { + info!("Event dispatcher: processing pending payload: {url}"); + let full_url = Url::parse(url.as_str()) + .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {url} as a URL")); + // find the right observer + let observer = self.registered_observers.iter().find(|observer| { + let endpoint_url = Url::parse(format!("http://{}", &observer.endpoint).as_str()) + .unwrap_or_else(|_| { + panic!( + "Event dispatcher: unable to parse {} as a URL", + observer.endpoint + ) + }); + full_url.origin() == endpoint_url.origin() + }); + + let Some(observer) = observer else { + // This observer is no longer registered, skip and delete + info!( + "Event dispatcher: observer {} no longer registered, skipping", + url + ); + if let Err(e) = Self::delete_payload(&conn, id) { + error!( + "Event observer: failed to delete pending payload from database"; + "error" => ?e + ); + } + continue; + }; + + observer.send_payload(&payload, full_url.path(), Some(id)); + + #[cfg(test)] + if TEST_EVENT_OBSERVER_SKIP_RETRY.get() { + warn!("Fault injection: delete_payload"); + return; + } + + if let Err(e) = Self::delete_payload(&conn, id) { + error!( + "Event observer: failed to delete pending payload from database"; + "error" => ?e + ); + } + } } } @@ -1965,10 +2018,9 @@ mod test { fn test_init_db() { let dir = tempdir().unwrap(); let db_path = dir.path().join("test_init_db.sqlite"); - let db_path_str = db_path.to_str().unwrap(); // Call init_db - let conn_result = EventObserver::init_db(db_path_str); + let conn_result = EventDispatcher::init_db(&db_path); assert!(conn_result.is_ok(), "Failed to initialize the database"); // Check that the database file exists @@ -1989,9 +2041,8 @@ mod test { fn test_insert_and_get_pending_payloads() { let dir = tempdir().unwrap(); let db_path = dir.path().join("test_payloads.sqlite"); - let db_path_str = db_path.to_str().unwrap(); - let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + let conn = EventDispatcher::init_db(&db_path).expect("Failed to initialize the database"); let url = "http://example.com/api"; let payload = json!({"key": "value"}); @@ -2003,7 +2054,7 @@ mod test { // Get pending payloads let pending_payloads = - EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); assert_eq!(pending_payloads.len(), 1, "Expected one pending payload"); let (_id, retrieved_url, retrieved_payload, timeout_ms) = &pending_payloads[0]; @@ -2020,9 +2071,8 @@ mod test { fn test_delete_payload() { let dir = tempdir().unwrap(); let db_path = dir.path().join("test_delete_payload.sqlite"); - let db_path_str = db_path.to_str().unwrap(); - let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + let conn = EventDispatcher::init_db(&db_path).expect("Failed to initialize the database"); let url = "http://example.com/api"; let payload = json!({"key": "value"}); @@ -2034,7 +2084,7 @@ mod test { // Get pending payloads let pending_payloads = - EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); assert_eq!(pending_payloads.len(), 1, "Expected one pending payload"); let (id, _, _, _) = pending_payloads[0]; @@ -2045,7 +2095,7 @@ mod test { // Verify that the pending payloads list is empty let pending_payloads = - EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); } @@ -2056,14 +2106,20 @@ mod test { let dir = tempdir().unwrap(); let db_path = dir.path().join("event_observers.sqlite"); - let db_path_str = db_path.to_str().unwrap(); let mut server = mockito::Server::new(); let endpoint = server.host_with_port(); + info!("endpoint: {}", endpoint); let timeout = Duration::from_secs(5); - let observer = - EventObserver::new(Some(dir.path().to_path_buf()), endpoint.clone(), timeout); - let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + let mut dispatcher = EventDispatcher::new(Some(dir.path().to_path_buf())); + + dispatcher.register_observer(&EventObserverConfig { + endpoint: endpoint.clone(), + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: timeout.as_millis() as u64, + }); + + let conn = EventDispatcher::init_db(&db_path).expect("Failed to initialize the database"); let payload = json!({"key": "value"}); let timeout = Duration::from_secs(5); @@ -2083,12 +2139,14 @@ mod test { EventObserver::insert_payload(&conn, url, &payload, timeout) .expect("Failed to insert payload"); + // dispatcher.process_pe + // Process pending payloads - observer.process_pending_payloads(&conn); + dispatcher.process_pending_payloads(); // Verify that the pending payloads list is empty let pending_payloads = - EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); // Verify that the mock was called @@ -2099,15 +2157,19 @@ mod test { fn pending_payloads_are_skipped_if_url_does_not_match() { let dir = tempdir().unwrap(); let db_path = dir.path().join("event_observers.sqlite"); - let db_path_str = db_path.to_str().unwrap(); let mut server = mockito::Server::new(); let endpoint = server.host_with_port(); let timeout = Duration::from_secs(5); - let observer = - EventObserver::new(Some(dir.path().to_path_buf()), endpoint.clone(), timeout); + let mut dispatcher = EventDispatcher::new(Some(dir.path().to_path_buf())); + + dispatcher.register_observer(&EventObserverConfig { + endpoint: endpoint.clone(), + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: timeout.as_millis() as u64, + }); - let conn = EventObserver::init_db(db_path_str).expect("Failed to initialize the database"); + let conn = EventDispatcher::init_db(&db_path).expect("Failed to initialize the database"); let payload = json!({"key": "value"}); let timeout = Duration::from_secs(5); @@ -2129,38 +2191,39 @@ mod test { EventObserver::insert_payload(&conn, url, &payload, timeout) .expect("Failed to insert payload"); - observer.process_pending_payloads(&conn); + dispatcher.process_pending_payloads(); let pending_payloads = - EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); // Verify that the pending payload is still in the database assert_eq!( pending_payloads.len(), - 1, - "Expected payload to remain in database since URL didn't match" + 0, + "Expected payload to be removed from database since URL didn't match" ); mock.assert(); } #[test] - fn test_new_event_observer_with_db() { + fn test_new_event_dispatcher_with_db() { let dir = tempdir().unwrap(); let working_dir = dir.path().to_path_buf(); - let endpoint = "http://example.com".to_string(); - let timeout = Duration::from_secs(5); + let dispatcher = EventDispatcher::new(Some(working_dir.clone())); - let observer = EventObserver::new(Some(working_dir.clone()), endpoint.clone(), timeout); + let expected_db_path = working_dir.join("event_observers.sqlite"); + assert_eq!(dispatcher.db_path, Some(expected_db_path.clone())); - // Verify fields - assert_eq!(observer.endpoint, endpoint); - assert_eq!(observer.timeout, timeout); + assert!( + !expected_db_path.exists(), + "Database file was created too soon" + ); + + EventDispatcher::init_db(&expected_db_path).expect("Failed to initialize the database"); // Verify that the database was initialized - let mut db_path = working_dir; - db_path.push("event_observers.sqlite"); - assert!(db_path.exists(), "Database file was not created"); + assert!(expected_db_path.exists(), "Database file was not created"); } #[test] @@ -2185,6 +2248,10 @@ mod test { let working_dir = dir.path().to_path_buf(); let payload = json!({"key": "value"}); + let dispatcher = EventDispatcher::new(Some(working_dir.clone())); + let db_path = dispatcher.clone().db_path.clone().unwrap(); + EventDispatcher::init_db(&db_path).expect("Failed to initialize the database"); + // Create a mock server let mut server = mockito::Server::new(); let _m = server @@ -2197,12 +2264,12 @@ mod test { let endpoint = server.url().strip_prefix("http://").unwrap().to_string(); let timeout = Duration::from_secs(5); - let observer = EventObserver::new(Some(working_dir), endpoint, timeout); + let observer = EventObserver::new(Some(db_path.clone()), endpoint, timeout); TEST_EVENT_OBSERVER_SKIP_RETRY.set(false); // Call send_payload - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Verify that the payload was sent and database is empty _m.assert(); @@ -2212,7 +2279,7 @@ mod test { let db_path_str = db_path.to_str().unwrap(); let conn = Connection::open(db_path_str).expect("Failed to open database"); let pending_payloads = - EventObserver::get_pending_payloads(&conn).expect("Failed to get pending payloads"); + EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); assert_eq!(pending_payloads.len(), 0, "Expected no pending payloads"); } @@ -2237,7 +2304,7 @@ mod test { let observer = EventObserver::new(None, endpoint, timeout); // Call send_payload - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Verify that the payload was sent _m.assert(); @@ -2270,7 +2337,7 @@ mod test { let payload = json!({"key": "value"}); - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Wait for the server to process the request rx.recv_timeout(Duration::from_secs(5)) @@ -2319,7 +2386,7 @@ mod test { let payload = json!({"key": "value"}); - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Wait for the server to process the request rx.recv_timeout(Duration::from_secs(5)) @@ -2370,7 +2437,7 @@ mod test { let start_time = Instant::now(); // Call the function being tested - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Record the time after the function returns let elapsed_time = start_time.elapsed(); @@ -2414,13 +2481,13 @@ mod test { attempt += 1; match attempt { 1 => { - debug!("Mock server received request attempt 1"); + info!("Mock server received request attempt 1"); // Do not reply, forcing the sender to timeout and retry, // but don't drop the request or it will receive a 500 error, _request_holder = Some(request); } 2 => { - debug!("Mock server received request attempt 2"); + info!("Mock server received request attempt 2"); // Verify the payload let mut payload = String::new(); @@ -2433,7 +2500,7 @@ mod test { request.respond(response).unwrap(); } 3 => { - debug!("Mock server received request attempt 3"); + info!("Mock server received request attempt 3"); // Verify the payload let mut payload = String::new(); @@ -2455,7 +2522,15 @@ mod test { } }); - let observer = EventObserver::new(Some(working_dir), format!("127.0.0.1:{port}"), timeout); + let mut dispatcher = EventDispatcher::new(Some(working_dir.clone())); + + let observer = dispatcher.register_observer(&EventObserverConfig { + endpoint: format!("127.0.0.1:{port}"), + timeout_ms: timeout.as_millis() as u64, + events_keys: vec![EventKeyType::AnyEvent], + }); + + EventDispatcher::init_db(&dispatcher.clone().db_path.unwrap()).unwrap(); let payload = json!({"key": "value"}); let payload2 = json!({"key": "value2"}); @@ -2467,15 +2542,17 @@ mod test { info!("Sending payload 1"); // Send the payload - observer.send_payload(&payload, "/test"); + observer.send_payload(&payload, "/test", None); // Re-enable retrying TEST_EVENT_OBSERVER_SKIP_RETRY.set(false); + dispatcher.process_pending_payloads(); + info!("Sending payload 2"); // Send another payload - observer.send_payload(&payload2, "/test"); + observer.send_payload(&payload2, "/test", None); // Wait for the server to process the requests rx.recv_timeout(Duration::from_secs(5)) diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 146441d2ae4..093f19c8ada 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -337,11 +337,12 @@ impl Node { ) .expect("FATAL: failed to initiate mempool"); - let mut event_dispatcher = EventDispatcher::new(); + let mut event_dispatcher = EventDispatcher::new(Some(config.get_working_dir())); for observer in &config.events_observers { - event_dispatcher.register_observer(observer, config.get_working_dir()); + event_dispatcher.register_observer(observer); } + event_dispatcher.process_pending_payloads(); let burnchain_config = config.get_burnchain(); diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 335fb325d8a..beffb7c8956 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -91,10 +91,11 @@ impl RunLoop { config.burnchain.burn_fee_cap, ))); - let mut event_dispatcher = EventDispatcher::new(); + let mut event_dispatcher = EventDispatcher::new(Some(config.get_working_dir())); for observer in config.events_observers.iter() { - event_dispatcher.register_observer(observer, config.get_working_dir()); + event_dispatcher.register_observer(observer); } + event_dispatcher.process_pending_payloads(); Self { config, @@ -401,7 +402,7 @@ impl RunLoop { /// This function will block by looping infinitely. /// It will start the burnchain (separate thread), set-up a channel in /// charge of coordinating the new blocks coming from the burnchain and - /// the nodes, taking turns on tenures. + /// the nodes, taking turns on tenures. pub fn start( &mut self, burnchain_opt: Option, diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 299335f35f8..f7effad9ba4 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -274,10 +274,11 @@ impl RunLoop { config.burnchain.burn_fee_cap, ))); - let mut event_dispatcher = EventDispatcher::new(); + let mut event_dispatcher = EventDispatcher::new(Some(config.get_working_dir())); for observer in config.events_observers.iter() { - event_dispatcher.register_observer(observer, config.get_working_dir()); + event_dispatcher.register_observer(observer); } + event_dispatcher.process_pending_payloads(); Self { config, From ea4d566c1a3ccf69ab55fdaf29f1781da00f23db Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Mon, 10 Mar 2025 13:21:42 +0100 Subject: [PATCH 090/238] refactor: add rusqlite cargo feature --- clarity/Cargo.toml | 7 +++---- clarity/src/vm/analysis/mod.rs | 4 ++-- clarity/src/vm/database/clarity_store.rs | 8 ++++---- clarity/src/vm/database/mod.rs | 6 +++--- clarity/src/vm/docs/contracts.rs | 12 ++++++------ clarity/src/vm/errors.rs | 4 ++-- clarity/src/vm/mod.rs | 2 +- stacks-common/Cargo.toml | 5 ++--- stacks-common/src/bitvec.rs | 6 +++--- stacks-common/src/types/mod.rs | 2 +- stacks-common/src/util/macros.rs | 2 +- 11 files changed, 28 insertions(+), 30 deletions(-) diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index fb0a191d6f7..b217972938f 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -31,6 +31,7 @@ stacks_common = { package = "stacks-common", path = "../stacks-common" } rstest = "0.17.0" rstest_reuse = "0.5.0" hashbrown = { workspace = true } +rusqlite = { workspace = true, optional = true } [dependencies.serde_json] version = "1.0" @@ -40,9 +41,6 @@ features = ["arbitrary_precision", "unbounded_depth"] version = "0.2.23" features = ["std"] -[target.'cfg(not(target_family = "wasm"))'.dependencies] -rusqlite = { workspace = true } - [dev-dependencies] assert-json-diff = "1.0.0" mutants = "0.0.3" @@ -51,9 +49,10 @@ mutants = "0.0.3" # criterion = "0.3" [features] -default = [] +default = ["rusqlite"] developer-mode = ["stacks_common/developer-mode"] slog_json = ["stacks_common/slog_json"] +rusqlite = ["stacks_common/rusqlite", "dep:rusqlite"] testing = [] devtools = [] rollback_value_check = [] diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 10717100e2e..11e42ad9ae1 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -36,7 +36,7 @@ use self::type_checker::v2_1::TypeChecker as TypeChecker2_1; pub use self::types::{AnalysisPass, ContractAnalysis}; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::costs::LimitedCostTracker; -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] use crate::vm::database::MemoryBackingStore; use crate::vm::database::STORE_CONTRACT_SRC_INTERFACE; use crate::vm::representations::SymbolicExpression; @@ -44,7 +44,7 @@ use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; use crate::vm::ClarityVersion; /// Used by CLI tools like the docs generator. Not used in production -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] pub fn mem_type_check( snippet: &str, version: ClarityVersion, diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index 3d1e4c975ec..694403513bd 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] use rusqlite::Connection; use stacks_common::types::chainstate::{StacksBlockId, TrieHash}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha512Trunc256Sum}; use crate::vm::analysis::AnalysisDatabase; use crate::vm::contexts::GlobalContext; -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] use crate::vm::database::{ ClarityDatabase, ClarityDeserializable, ClaritySerializable, NULL_BURN_STATE_DB, NULL_HEADER_DB, }; @@ -85,7 +85,7 @@ pub trait ClarityBackingStore { fn get_open_chain_tip_height(&mut self) -> u32; fn get_open_chain_tip(&mut self) -> StacksBlockId; - #[cfg(not(target_family = "wasm"))] + #[cfg(feature = "rusqlite")] fn get_side_store(&mut self) -> &Connection; fn get_cc_special_cases_handler(&self) -> Option { @@ -222,7 +222,7 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't retrieve data") } - #[cfg(not(target_family = "wasm"))] + #[cfg(feature = "rusqlite")] fn get_side_store(&mut self) -> &Connection { panic!("NullBackingStore has no side store") } diff --git a/clarity/src/vm/database/mod.rs b/clarity/src/vm/database/mod.rs index 2ebeb00dfd7..cee4cbe00cc 100644 --- a/clarity/src/vm/database/mod.rs +++ b/clarity/src/vm/database/mod.rs @@ -13,7 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] pub use sqlite::MemoryBackingStore; pub use self::clarity_db::{ @@ -22,7 +22,7 @@ pub use self::clarity_db::{ }; pub use self::clarity_store::{ClarityBackingStore, SpecialCaseHandler}; pub use self::key_value_wrapper::{RollbackWrapper, RollbackWrapperPersistedLog}; -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] pub use self::sqlite::SqliteConnection; pub use self::structures::{ ClarityDeserializable, ClaritySerializable, DataMapMetadata, DataVariableMetadata, @@ -32,6 +32,6 @@ pub use self::structures::{ pub mod clarity_db; pub mod clarity_store; mod key_value_wrapper; -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] pub mod sqlite; mod structures; diff --git a/clarity/src/vm/docs/contracts.rs b/clarity/src/vm/docs/contracts.rs index 230bb2a2fdf..1acdda78d85 100644 --- a/clarity/src/vm/docs/contracts.rs +++ b/clarity/src/vm/docs/contracts.rs @@ -4,13 +4,13 @@ use hashbrown::{HashMap, HashSet}; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::StacksEpochId; -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] use crate::vm::analysis::mem_type_check; use crate::vm::analysis::ContractAnalysis; use crate::vm::ast::{build_ast_with_rules, ASTRules}; use crate::vm::contexts::GlobalContext; use crate::vm::costs::LimitedCostTracker; -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] use crate::vm::database::MemoryBackingStore; use crate::vm::docs::{get_input_type_string, get_output_type_string, get_signature}; use crate::vm::types::{FunctionType, QualifiedContractIdentifier, Value}; @@ -63,7 +63,7 @@ fn make_func_ref(func_name: &str, func_type: &FunctionType, description: &str) - } } -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] #[allow(clippy::expect_used)] fn get_constant_value(var_name: &str, contract_content: &str) -> Value { let to_eval = format!("{}\n{}", contract_content, var_name); @@ -72,7 +72,7 @@ fn get_constant_value(var_name: &str, contract_content: &str) -> Value { .expect("BUG: failed to return constant value") } -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] fn doc_execute(program: &str) -> Result, vm::Error> { let contract_id = QualifiedContractIdentifier::transient(); let mut contract_context = ContractContext::new(contract_id.clone(), ClarityVersion::Clarity2); @@ -99,7 +99,7 @@ fn doc_execute(program: &str) -> Result, vm::Error> { }) } -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] #[allow(clippy::expect_used)] pub fn make_docs( content: &str, @@ -185,7 +185,7 @@ pub fn make_docs( /// Produce a set of documents for multiple contracts, supplied as a list of `(contract_name, contract_content)` pairs, /// and a map from `contract_name` to corresponding `ContractSupportDocs` -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] pub fn produce_docs_refs, B: AsRef>( contracts: &[(A, B)], support_docs: &HashMap<&str, ContractSupportDocs>, diff --git a/clarity/src/vm/errors.rs b/clarity/src/vm/errors.rs index 69b623ea388..a3100dcd83a 100644 --- a/clarity/src/vm/errors.rs +++ b/clarity/src/vm/errors.rs @@ -16,7 +16,7 @@ use std::{error, fmt}; -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] use rusqlite::Error as SqliteError; use serde_json::Error as SerdeJSONErr; use stacks_common::types::chainstate::BlockHeaderHash; @@ -57,7 +57,7 @@ pub enum InterpreterError { UninitializedPersistedVariable, FailedToConstructAssetTable, FailedToConstructEventBatch, - #[cfg(not(target_family = "wasm"))] + #[cfg(feature = "rusqlite")] SqliteError(IncomparableError), BadFileName, FailedToCreateDataDirectory, diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 9ab3f3a5f45..8d08dd1950f 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -42,7 +42,7 @@ pub mod coverage; pub mod events; -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] pub mod tooling; #[cfg(any(test, feature = "testing"))] diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 441af02d580..4b965d753d2 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -31,6 +31,7 @@ slog-json = { version = "2.3.0", optional = true } chrono = "0.4.19" libc = "0.2.82" hashbrown = { workspace = true } +rusqlite = { workspace = true, optional = true } [target.'cfg(unix)'.dependencies] nix = "0.23" @@ -60,9 +61,6 @@ features = ["serde"] version = "0.2.23" features = ["std"] -[target.'cfg(not(target_family = "wasm"))'.dependencies] -rusqlite = { workspace = true } - [dev-dependencies] rand_core = { workspace = true } @@ -70,6 +68,7 @@ rand_core = { workspace = true } default = ["developer-mode"] developer-mode = [] slog_json = ["slog-json"] +rusqlite = ["dep:rusqlite"] testing = [] serde = [] bech32_std = [] diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index b5f6c3b26d9..b4d61397fa4 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use serde::{Deserialize, Serialize}; @@ -106,7 +106,7 @@ impl<'de, const MAX_SIZE: u16> Deserialize<'de> for BitVec { } } -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] impl FromSql for BitVec { fn column_result(value: ValueRef<'_>) -> FromSqlResult { let bytes = hex_bytes(value.as_str()?).map_err(|e| FromSqlError::Other(Box::new(e)))?; @@ -115,7 +115,7 @@ impl FromSql for BitVec { } } -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] impl ToSql for BitVec { fn to_sql(&self) -> rusqlite::Result> { let hex = bytes_to_hex(self.serialize_to_vec().as_slice()); diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 94ca15b7381..80f9ecf3fd4 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -19,7 +19,7 @@ use std::fmt; use std::ops::{Deref, DerefMut, Index, IndexMut}; use std::sync::LazyLock; -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] pub mod sqlite; use crate::address::c32::{c32_address, c32_address_decode}; diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 563babf905e..9e45a05994e 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -707,7 +707,7 @@ macro_rules! fmax { }} } -#[cfg(not(target_family = "wasm"))] +#[cfg(feature = "rusqlite")] macro_rules! impl_byte_array_rusqlite_only { ($thing:ident) => { impl rusqlite::types::FromSql for $thing { From 13854497311dc58a3e1dfa56cd33d85a8acd94b1 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Mon, 10 Mar 2025 15:17:18 +0100 Subject: [PATCH 091/238] refactor: use libsecp256k1 or wasm --- Cargo.lock | 100 +++++- stacks-common/Cargo.toml | 10 +- .../src/util/secp256k1/libsepc256k1.rs | 323 ++++++++++++++++++ stacks-common/src/util/secp256k1/mod.rs | 33 ++ .../src/util/{ => secp256k1}/secp256k1.rs | 21 +- 5 files changed, 466 insertions(+), 21 deletions(-) create mode 100644 stacks-common/src/util/secp256k1/libsepc256k1.rs create mode 100644 stacks-common/src/util/secp256k1/mod.rs rename stacks-common/src/util/{ => secp256k1}/secp256k1.rs (98%) diff --git a/Cargo.lock b/Cargo.lock index a51010ecdf3..d1ac2f362f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -173,6 +173,12 @@ version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + [[package]] name = "ascii" version = "1.1.0" @@ -685,7 +691,7 @@ dependencies = [ "aes-gcm", "base64 0.13.1", "hkdf", - "hmac", + "hmac 0.10.1", "percent-encoding", "rand 0.8.5", "sha2 0.9.9", @@ -739,6 +745,12 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + [[package]] name = "crypto-common" version = "0.1.6" @@ -749,6 +761,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array 0.14.7", + "subtle", +] + [[package]] name = "crypto-mac" version = "0.10.0" @@ -1382,7 +1404,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51ab2f639c231793c5f6114bdb9bbe50a7dbbfcd7c7c6bd8475dec2d991e964f" dependencies = [ "digest 0.9.0", - "hmac", + "hmac 0.10.1", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", ] [[package]] @@ -1391,10 +1423,21 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" dependencies = [ - "crypto-mac", + "crypto-mac 0.10.0", "digest 0.9.0", ] +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array 0.14.7", + "hmac 0.8.1", +] + [[package]] name = "http" version = "0.2.11" @@ -1502,7 +1545,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -1737,6 +1780,54 @@ dependencies = [ "redox_syscall 0.4.1", ] +[[package]] +name = "libsecp256k1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +dependencies = [ + "arrayref", + "base64 0.13.1", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "libsigner" version = "0.0.1" @@ -3048,6 +3139,7 @@ dependencies = [ "hashbrown 0.15.2", "lazy_static", "libc", + "libsecp256k1", "nix", "rand 0.8.5", "rand_core 0.6.4", diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 491a6863507..2274de98097 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -46,10 +46,6 @@ winapi = { version = "0.3", features = ["fileapi", "processenv", "winnt"] } version = "1.0" features = ["arbitrary_precision", "unbounded_depth"] -[dependencies.secp256k1] -version = "0.24.3" -features = ["serde", "recovery"] - [dependencies.ed25519-dalek] workspace = true @@ -61,6 +57,12 @@ features = ["serde"] version = "0.2.23" features = ["std"] +[target.'cfg(not(target_family = "wasm"))'.dependencies] +secp256k1 = { version = "0.24.3", features = ["serde", "recovery"] } + +[target.'cfg(target_family = "wasm")'.dependencies] +libsecp256k1 = { version = "0.7.0" } + [dev-dependencies] rand_core = { workspace = true } diff --git a/stacks-common/src/util/secp256k1/libsepc256k1.rs b/stacks-common/src/util/secp256k1/libsepc256k1.rs new file mode 100644 index 00000000000..bea3c5e2d5d --- /dev/null +++ b/stacks-common/src/util/secp256k1/libsepc256k1.rs @@ -0,0 +1,323 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use ::libsecp256k1; +pub use ::libsecp256k1::Error; +use ::libsecp256k1::{ + Error as LibSecp256k1Error, Message as LibSecp256k1Message, PublicKey as LibSecp256k1PublicKey, + RecoveryId as LibSecp256k1RecoveryId, SecretKey as LibSecp256k1PrivateKey, + Signature as LibSecp256k1Signature, +}; +use rand::RngCore; +use serde::de::{Deserialize, Error as de_Error}; +use serde::Serialize; + +use super::MessageSignature; +use crate::types::{PrivateKey, PublicKey}; +use crate::util::hash::{hex_bytes, to_hex}; + +pub const PUBLIC_KEY_SIZE: usize = 33; + +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct Secp256k1PublicKey { + // serde is broken for secp256k1, so do it ourselves + #[serde( + serialize_with = "secp256k1_pubkey_serialize", + deserialize_with = "secp256k1_pubkey_deserialize" + )] + key: LibSecp256k1PublicKey, + compressed: bool, +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +pub struct Secp256k1PrivateKey { + // serde is broken for secp256k1, so do it ourselves + #[serde( + serialize_with = "secp256k1_privkey_serialize", + deserialize_with = "secp256k1_privkey_deserialize" + )] + key: LibSecp256k1PrivateKey, + compress_public: bool, +} + +impl Secp256k1PublicKey { + pub fn from_slice(data: &[u8]) -> Result { + let (format, compressed) = if data.len() == PUBLIC_KEY_SIZE { + (libsecp256k1::PublicKeyFormat::Compressed, true) + } else { + (libsecp256k1::PublicKeyFormat::Full, false) + }; + match LibSecp256k1PublicKey::parse_slice(data, Some(format)) { + Ok(pubkey_res) => Ok(Secp256k1PublicKey { + key: pubkey_res, + compressed, + }), + Err(_e) => Err("Invalid public key: failed to load"), + } + } + + pub fn to_hex(&self) -> String { + if self.compressed { + to_hex(&self.key.serialize_compressed().to_vec()) + } else { + to_hex(&self.key.serialize().to_vec()) + } + } + + pub fn to_bytes_compressed(&self) -> Vec { + self.key.serialize_compressed().to_vec() + } + + pub fn compressed(&self) -> bool { + self.compressed + } + + pub fn set_compressed(&mut self, value: bool) { + self.compressed = value; + } + + pub fn to_bytes(&self) -> Vec { + if self.compressed { + self.key.serialize_compressed().to_vec() + } else { + self.key.serialize().to_vec() + } + } + + pub fn from_hex(hex_string: &str) -> Result { + let data = hex_bytes(hex_string).map_err(|_e| "Failed to decode hex public key")?; + Secp256k1PublicKey::from_slice(&data[..]).map_err(|_e| "Invalid public key hex string") + } + + pub fn from_private(privk: &Secp256k1PrivateKey) -> Secp256k1PublicKey { + let key = LibSecp256k1PublicKey::from_secret_key(&privk.key); + Secp256k1PublicKey { + key, + compressed: privk.compress_public, + } + } + + /// recover message and signature to public key (will be compressed) + pub fn recover_to_pubkey( + msg: &[u8], + sig: &MessageSignature, + ) -> Result { + let secp256k1_sig = secp256k1_recover(msg, sig.as_bytes()) + .map_err(|_e| "Invalid signature: failed to recover public key")?; + + Secp256k1PublicKey::from_slice(&secp256k1_sig) + } +} + +impl Secp256k1PrivateKey { + pub fn new() -> Secp256k1PrivateKey { + let mut rng = rand::thread_rng(); + loop { + // keep trying to generate valid bytes + let mut random_32_bytes = [0u8; 32]; + rng.fill_bytes(&mut random_32_bytes); + let pk_res = LibSecp256k1PrivateKey::parse_slice(&random_32_bytes); + match pk_res { + Ok(pk) => { + return Secp256k1PrivateKey { + key: pk, + compress_public: true, + }; + } + Err(_) => { + continue; + } + } + } + } + + pub fn from_slice(data: &[u8]) -> Result { + if data.len() < 32 { + return Err("Invalid private key: shorter than 32 bytes"); + } + if data.len() > 33 { + return Err("Invalid private key: greater than 33 bytes"); + } + let compress_public = if data.len() == 33 { + // compressed byte tag? + if data[32] != 0x01 { + return Err("Invalid private key: invalid compressed byte marker"); + } + true + } else { + false + }; + + match LibSecp256k1PrivateKey::parse_slice(&data[0..32]) { + Ok(privkey_res) => Ok(Secp256k1PrivateKey { + key: privkey_res, + compress_public, + }), + Err(_e) => Err("Invalid private key: failed to load"), + } + } + + pub fn from_hex(hex_string: &str) -> Result { + let data = hex_bytes(hex_string).map_err(|_e| "Failed to decode hex private key")?; + Secp256k1PrivateKey::from_slice(&data[..]).map_err(|_e| "Invalid private key hex string") + } + + pub fn compress_public(&self) -> bool { + self.compress_public + } + + pub fn set_compress_public(&mut self, value: bool) { + self.compress_public = value; + } +} + +pub fn secp256k1_recover( + message_arr: &[u8], + serialized_signature: &[u8], +) -> Result<[u8; 33], LibSecp256k1Error> { + let recovery_id = libsecp256k1::RecoveryId::parse(serialized_signature[64] as u8)?; + let message = LibSecp256k1Message::parse_slice(message_arr)?; + let signature = LibSecp256k1Signature::parse_standard_slice(&serialized_signature[..64])?; + let recovered_pub_key = libsecp256k1::recover(&message, &signature, &recovery_id)?; + Ok(recovered_pub_key.serialize_compressed()) +} + +pub fn secp256k1_verify( + message_arr: &[u8], + serialized_signature: &[u8], + pubkey_arr: &[u8], +) -> Result<(), LibSecp256k1Error> { + let message = LibSecp256k1Message::parse_slice(message_arr)?; + let signature = LibSecp256k1Signature::parse_standard_slice(&serialized_signature[..64])?; // ignore 65th byte if present + let pubkey = LibSecp256k1PublicKey::parse_slice( + pubkey_arr, + Some(libsecp256k1::PublicKeyFormat::Compressed), + )?; + + let res = libsecp256k1::verify(&message, &signature, &pubkey); + if res { + Ok(()) + } else { + Err(LibSecp256k1Error::InvalidPublicKey) + } +} + +fn secp256k1_pubkey_serialize( + pubk: &LibSecp256k1PublicKey, + s: S, +) -> Result { + let key_hex = to_hex(&pubk.serialize().to_vec()); + s.serialize_str(&key_hex.as_str()) +} + +fn secp256k1_pubkey_deserialize<'de, D: serde::Deserializer<'de>>( + d: D, +) -> Result { + let key_hex = String::deserialize(d)?; + let key_bytes = hex_bytes(&key_hex).map_err(de_Error::custom)?; + + LibSecp256k1PublicKey::parse_slice(&key_bytes[..], None).map_err(de_Error::custom) +} + +fn secp256k1_privkey_serialize( + privk: &LibSecp256k1PrivateKey, + s: S, +) -> Result { + let key_hex = to_hex(&privk.serialize().to_vec()); + s.serialize_str(key_hex.as_str()) +} + +fn secp256k1_privkey_deserialize<'de, D: serde::Deserializer<'de>>( + d: D, +) -> Result { + let key_hex = String::deserialize(d)?; + let key_bytes = hex_bytes(&key_hex).map_err(de_Error::custom)?; + + LibSecp256k1PrivateKey::parse_slice(&key_bytes[..]).map_err(de_Error::custom) +} + +impl MessageSignature { + pub fn empty() -> MessageSignature { + // NOTE: this cannot be a valid signature + MessageSignature([0u8; 65]) + } + + #[cfg(test)] + // test method for generating place-holder data + pub fn from_raw(sig: &Vec) -> MessageSignature { + let mut buf = [0u8; 65]; + if sig.len() < 65 { + buf.copy_from_slice(&sig[..]); + } else { + buf.copy_from_slice(&sig[..65]); + } + MessageSignature(buf) + } + + pub fn from_secp256k1_recoverable( + sig: &LibSecp256k1Signature, + recid: LibSecp256k1RecoveryId, + ) -> MessageSignature { + let bytes = sig.serialize(); + let mut ret_bytes = [0u8; 65]; + let recovery_id_byte = recid.serialize(); // recovery ID will be 0, 1, 2, or 3 + ret_bytes[0] = recovery_id_byte; + ret_bytes[1..=64].copy_from_slice(&bytes[..64]); + MessageSignature(ret_bytes) + } + + pub fn to_secp256k1_recoverable( + &self, + ) -> Option<(LibSecp256k1Signature, LibSecp256k1RecoveryId)> { + let recovery_id = match LibSecp256k1RecoveryId::parse(self.0[0]) { + Ok(rid) => rid, + Err(_) => { + return None; + } + }; + let signature = LibSecp256k1Signature::parse_standard_slice(&self.0[1..65]).ok()?; + Some((signature, recovery_id)) + } +} + +impl PublicKey for Secp256k1PublicKey { + fn to_bytes(&self) -> Vec { + self.to_bytes() + } + + fn verify(&self, data_hash: &[u8], sig: &MessageSignature) -> Result { + let pub_key = Secp256k1PublicKey::recover_to_pubkey(data_hash, sig)?; + Ok(self.eq(&pub_key)) + } +} + +impl PrivateKey for Secp256k1PrivateKey { + fn to_bytes(&self) -> Vec { + let mut bits = self.key.serialize().to_vec(); + if self.compress_public { + bits.push(0x01); + } + bits + } + + fn sign(&self, data_hash: &[u8]) -> Result { + let message = LibSecp256k1Message::parse_slice(data_hash) + .map_err(|_e| "Invalid message: failed to decode data hash: must be a 32-byte hash")?; + let (sig, recid) = libsecp256k1::sign(&message, &self.key); + let rec_sig = MessageSignature::from_secp256k1_recoverable(&sig, recid); + Ok(rec_sig) + } +} diff --git a/stacks-common/src/util/secp256k1/mod.rs b/stacks-common/src/util/secp256k1/mod.rs new file mode 100644 index 00000000000..2241f5998cf --- /dev/null +++ b/stacks-common/src/util/secp256k1/mod.rs @@ -0,0 +1,33 @@ +#[cfg(not(target_family = "wasm"))] +mod secp256k1; + +#[cfg(not(target_family = "wasm"))] +pub use self::secp256k1::*; + +#[cfg(target_family = "wasm")] +mod libsecp256k1; + +#[cfg(target_family = "wasm")] +pub use self::libsecp256k1::*; + +pub const MESSAGE_SIGNATURE_ENCODED_SIZE: u32 = 65; + +pub struct MessageSignature(pub [u8; 65]); +impl_array_newtype!(MessageSignature, u8, 65); +impl_array_hexstring_fmt!(MessageSignature); +impl_byte_array_newtype!(MessageSignature, u8, 65); +impl_byte_array_serde!(MessageSignature); + +pub struct SchnorrSignature(pub [u8; 65]); +impl_array_newtype!(SchnorrSignature, u8, 65); +impl_array_hexstring_fmt!(SchnorrSignature); +impl_byte_array_newtype!(SchnorrSignature, u8, 65); +impl_byte_array_serde!(SchnorrSignature); +pub const SCHNORR_SIGNATURE_ENCODED_SIZE: u32 = 65; + +impl Default for SchnorrSignature { + /// Creates a default Schnorr Signature. Note this is not a valid signature. + fn default() -> Self { + Self([0u8; 65]) + } +} diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1/secp256k1.rs similarity index 98% rename from stacks-common/src/util/secp256k1.rs rename to stacks-common/src/util/secp256k1/secp256k1.rs index e33ce4f1549..f547f7825da 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1/secp256k1.rs @@ -13,22 +13,24 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use rand::RngCore; -use secp256k1; -use secp256k1::ecdsa::{ + +use ::secp256k1; +use ::secp256k1::ecdsa::{ RecoverableSignature as LibSecp256k1RecoverableSignature, RecoveryId as LibSecp256k1RecoveryID, Signature as LibSecp256k1Signature, }; -use secp256k1::{ +pub use ::secp256k1::Error; +use ::secp256k1::{ constants as LibSecp256k1Constants, Error as LibSecp256k1Error, Message as LibSecp256k1Message, PublicKey as LibSecp256k1PublicKey, Secp256k1, SecretKey as LibSecp256k1PrivateKey, }; +use rand::RngCore; use serde::de::{Deserialize, Error as de_Error}; use serde::Serialize; -use super::hash::Sha256Sum; +use super::MessageSignature; use crate::types::{PrivateKey, PublicKey}; -use crate::util::hash::{hex_bytes, to_hex}; +use crate::util::hash::{hex_bytes, to_hex, Sha256Sum}; // per-thread Secp256k1 context thread_local!(static _secp256k1: Secp256k1 = Secp256k1::new()); @@ -55,13 +57,6 @@ pub struct Secp256k1PrivateKey { compress_public: bool, } -pub struct MessageSignature(pub [u8; 65]); -impl_array_newtype!(MessageSignature, u8, 65); -impl_array_hexstring_fmt!(MessageSignature); -impl_byte_array_newtype!(MessageSignature, u8, 65); -impl_byte_array_serde!(MessageSignature); -pub const MESSAGE_SIGNATURE_ENCODED_SIZE: u32 = 65; - impl MessageSignature { pub fn empty() -> MessageSignature { // NOTE: this cannot be a valid signature From e9cc50c1cbe916e6bb3882094d53e3f99b99cb6a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 10 Mar 2025 10:34:04 -0400 Subject: [PATCH 092/238] chore: remove candidate cache --- stackslib/src/core/mempool.rs | 67 ----------------------------------- 1 file changed, 67 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 0f6e5e014be..cbc40092518 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1053,73 +1053,6 @@ pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_ Ok(ret) } -/// Cache potential candidate transactions for subsequent iterations. -/// While walking the mempool, transactions that have nonces that are too high -/// to process yet (but could be processed in the future) are added to `next`. -/// In the next pass, `next` is moved to `cache` and these transactions are -/// checked before reading more from the mempool DB. -struct CandidateCache { - cache: VecDeque, - next: VecDeque, - /// The maximum size that this cache can be. - max_cache_size: usize, -} - -impl CandidateCache { - fn new(candidate_retry_cache_size: usize) -> Self { - Self { - cache: VecDeque::new(), - next: VecDeque::new(), - max_cache_size: candidate_retry_cache_size, - } - } - - /// Retrieve the next candidate transaction from the cache. - fn next(&mut self) -> Option { - self.cache.pop_front() - } - - /// Push a candidate to the cache for the next iteration. - fn push(&mut self, tx: MemPoolTxInfoPartial) { - if self.next.len() < self.max_cache_size { - self.next.push_back(tx); - } - - #[cfg(test)] - assert!(self.cache.len() + self.next.len() <= self.max_cache_size); - } - - /// Prepare for the next iteration, transferring transactions from `next` to `cache`. - fn reset(&mut self) { - // We do not need a size check here, because the cache can only grow in size - // after `cache` is empty. New transactions are not walked until the entire - // cache has been walked, so whenever we are adding brand new transactions to - // the cache, `cache` must, by definition, be empty. The size of `next` - // can grow beyond the previous iteration's cache, and that is limited inside - // the `push` method. - self.next.append(&mut self.cache); - self.cache = std::mem::take(&mut self.next); - - #[cfg(test)] - { - assert!(self.cache.len() <= self.max_cache_size + 1); - assert!(self.next.len() <= self.max_cache_size + 1); - } - } - - /// Total length of the cache. - #[cfg_attr(test, mutants::skip)] - fn len(&self) -> usize { - self.cache.len() + self.next.len() - } - - /// Is the cache empty? - #[cfg_attr(test, mutants::skip)] - fn is_empty(&self) -> bool { - self.cache.is_empty() && self.next.is_empty() - } -} - /// Evaluates the pair of nonces, to determine an order /// /// Returns: From d120ab58e28c36f971581c4b2bedb8f89a49e745 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Mon, 10 Mar 2025 15:44:43 +0100 Subject: [PATCH 093/238] refactor: rename secp256k1 modules --- stacks-common/src/util/secp256k1/mod.rs | 8 ++++---- .../src/util/secp256k1/{secp256k1.rs => native.rs} | 0 .../src/util/secp256k1/{libsepc256k1.rs => wasm.rs} | 0 3 files changed, 4 insertions(+), 4 deletions(-) rename stacks-common/src/util/secp256k1/{secp256k1.rs => native.rs} (100%) rename stacks-common/src/util/secp256k1/{libsepc256k1.rs => wasm.rs} (100%) diff --git a/stacks-common/src/util/secp256k1/mod.rs b/stacks-common/src/util/secp256k1/mod.rs index 2241f5998cf..50ee281e306 100644 --- a/stacks-common/src/util/secp256k1/mod.rs +++ b/stacks-common/src/util/secp256k1/mod.rs @@ -1,14 +1,14 @@ #[cfg(not(target_family = "wasm"))] -mod secp256k1; +mod native; #[cfg(not(target_family = "wasm"))] -pub use self::secp256k1::*; +pub use self::native::*; #[cfg(target_family = "wasm")] -mod libsecp256k1; +mod wasm; #[cfg(target_family = "wasm")] -pub use self::libsecp256k1::*; +pub use self::wasm::*; pub const MESSAGE_SIGNATURE_ENCODED_SIZE: u32 = 65; diff --git a/stacks-common/src/util/secp256k1/secp256k1.rs b/stacks-common/src/util/secp256k1/native.rs similarity index 100% rename from stacks-common/src/util/secp256k1/secp256k1.rs rename to stacks-common/src/util/secp256k1/native.rs diff --git a/stacks-common/src/util/secp256k1/libsepc256k1.rs b/stacks-common/src/util/secp256k1/wasm.rs similarity index 100% rename from stacks-common/src/util/secp256k1/libsepc256k1.rs rename to stacks-common/src/util/secp256k1/wasm.rs From 4777e516e4b99c7265e32378e61a69e124ba8373 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Mon, 10 Mar 2025 15:50:18 +0100 Subject: [PATCH 094/238] refactor: remove stack-common/default from clarity --- clarity/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index b217972938f..9077834a703 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -27,7 +27,7 @@ regex = "1" lazy_static = "1.4.0" integer-sqrt = "0.1.3" slog = { version = "2.5.2", features = [ "max_level_trace" ] } -stacks_common = { package = "stacks-common", path = "../stacks-common" } +stacks_common = { package = "stacks-common", path = "../stacks-common", default-features = false } rstest = "0.17.0" rstest_reuse = "0.5.0" hashbrown = { workspace = true } From 7575b634f51402fef1a4f0844bd9ceed024937d4 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 10 Mar 2025 12:53:31 -0400 Subject: [PATCH 095/238] chore: make it so non-Debian systems can run the block-replay script --- contrib/tools/block-replay.sh | 95 +++++++++++++++++++++++++++-------- 1 file changed, 75 insertions(+), 20 deletions(-) diff --git a/contrib/tools/block-replay.sh b/contrib/tools/block-replay.sh index 4fb4a6a55b9..41e04d25d5b 100755 --- a/contrib/tools/block-replay.sh +++ b/contrib/tools/block-replay.sh @@ -16,18 +16,19 @@ set -o pipefail ## for 20 slices, this is about 1.8TB NETWORK="mainnet" ## network to replay -REPO_DIR="$HOME/stacks-inspect" ## where to build the source +REPO_DIR="$HOME/stacks-core" ## where to build the source REMOTE_REPO="stacks-network/stacks-core" ## remote git repo to build stacks-inspect from SCRATCH_DIR="$HOME/scratch" ## root folder for the replay slices TIMESTAMP=$(date +%Y-%m-%d-%s) ## use a simple date format year-month-day-epoch -LOG_DIR="/tmp/replay_${TIMESTAMP}" ## location of logfiles for the replay +LOG_DIR="$HOME/replay_${TIMESTAMP}" ## location of logfiles for the replay SLICE_DIR="${SCRATCH_DIR}/slice" ## location of slice dirs TMUX_SESSION="replay" ## tmux session name to run the replay TERM_OUT=false ## terminal friendly output TESTING=false ## only run a replay on a few thousand blocks BRANCH="develop" ## default branch to build stacks-inspect from CORES=$(grep -c processor /proc/cpuinfo) ## retrieve total number of CORES on the system -RESERVED=10 ## reserve this many CORES for other processes as default +RESERVED=8 ## reserve this many CORES for other processes as default +LOCAL_CHAINSTATE= ## path to local chainstate to use instead of snapshot download ## ansi color codes for terminal output COLRED=$'\033[31m' ## Red @@ -94,19 +95,25 @@ configure_replay_slices() { echo "${COLRED}Error${COLRESET} creating dir ${SLICE_DIR}" exit 1 } - echo "Downloading latest ${NETWORK} chainstate archive ${COLYELLOW}https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz${COLRESET}" - ## curl had some random issues retrying the download when network issues arose. wget has resumed more consistently, so we'll use that binary - # curl -L --proto '=https' --tlsv1.2 https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz -o ${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz || { - wget -O "${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz" "https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz" || { - echo "${COLRED}Error${COLRESET} downlaoding latest ${NETWORK} chainstate archive" - exit 1 - } - ## extract downloaded archive - echo "Extracting downloaded archive: ${COLYELLOW}${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz${COLRESET}" - tar --strip-components=1 -xzf "${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz" -C "${SLICE_DIR}0" || { - echo "${COLRED}Error${COLRESET} extracting ${NETWORK} chainstate archive" - exit - } + + if [[ -n "${LOCAL_CHAINSTATE}" ]]; then + echo "Copying local chainstate '${LOCAL_CHAINSTATE}'" + cp -r "${LOCAL_CHAINSTATE}"/* "${SLICE_DIR}0" + else + echo "Downloading latest ${NETWORK} chainstate archive ${COLYELLOW}https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz${COLRESET}" + ## curl had some random issues retrying the download when network issues arose. wget has resumed more consistently, so we'll use that binary + # curl -L --proto '=https' --tlsv1.2 https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz -o ${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz || { + wget -O "${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz" "https://archive.hiro.so/${NETWORK}/stacks-blockchain/${NETWORK}-stacks-blockchain-latest.tar.gz" || { + echo "${COLRED}Error${COLRESET} downlaoding latest ${NETWORK} chainstate archive" + exit 1 + } + ## extract downloaded archive + echo "Extracting downloaded archive: ${COLYELLOW}${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz${COLRESET}" + tar --strip-components=1 -xzf "${SCRATCH_DIR}/${NETWORK}-stacks-blockchain-latest.tar.gz" -C "${SLICE_DIR}0" || { + echo "${COLRED}Error${COLRESET} extracting ${NETWORK} chainstate archive" + exit + } + fi echo "Moving marf database: ${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs -> ${COLYELLOW}${SCRATCH_DIR}/marf.sqlite.blobs${COLRESET}" mv "${SLICE_DIR}"0/chainstate/vm/clarity/marf.sqlite.blobs "${SCRATCH_DIR}"/ echo "Symlinking marf database: ${SCRATCH_DIR}/marf.sqlite.blobs -> ${COLYELLOW}${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs${COLRESET}" @@ -377,6 +384,8 @@ usage() { echo " ${COLYELLOW}-t|--terminal${COLRESET}: more terminal friendly output" echo " ${COLYELLOW}-n|--network${COLRESET}: run block replay against specific network (default: mainnet)" echo " ${COLYELLOW}-b|--branch${COLRESET}: branch of stacks-core to build stacks-inspect from (default: develop)" + echo " ${COLYELLOW}-c|--chainstate${COLRESET}: local chainstate copy to use instead of downloading a chainstaet snapshot" + echo " ${COLYELLOW}-l|--logdir${COLRESET}: use existing log directory" echo " ${COLYELLOW}-r|--reserved${COLRESET}: how many cpu cores to reserve for system tasks" echo echo " ex: ${COLCYAN}${0} -t -u ${COLRESET}" @@ -386,9 +395,30 @@ usage() { ## install missing dependencies -for cmd in curl tmux git wget tar gzip grep cargo pgrep; do +HAS_APT=1 +HAS_SUDO=1 +for cmd in apt-get sudo curl tmux git wget tar gzip grep cargo pgrep tput find; do + # in Alpine, `find` might be linked to `busybox` and won't work + if [ "${cmd}" == "find" ] && [ -L "${cmd}" ]; then + local rp="$(readlink "$(command -v "${cmd}" || echo "NOTLINK")")" + if [ "${rp}" == "/bin/busybox" ]; then + echo "${COLRED}ERROR${COLRESET} Busybox 'find' is not supported. Please install 'findutils' or similar." + exit 1 + fi + fi + command -v "${cmd}" >/dev/null 2>&1 || { case "${cmd}" in + "apt-get") + echo "${COLYELLOW}WARN${COLRESET} 'apt-get' not found; automatic package installation will fail" + HAS_APT=0 + continue + ;; + "sudo") + echo "${COLYELLOW}WARN${COLRESET} 'sudo' not found; automatic package installation will fail" + HAS_SUDO=0 + continue + ;; "cargo") install_cargo ;; @@ -399,6 +429,11 @@ for cmd in curl tmux git wget tar gzip grep cargo pgrep; do package="${cmd}" ;; esac + + if [[ ${HAS_APT} = 0 ]] || [[ ${HAS_SUDO} = 0 ]]; then + echo "${COLRED}Error${COLRESET} Missing command '${cmd}'" + exit 1 + fi (sudo apt-get update && sudo apt-get install "${package}") || { echo "${COLRED}Error${COLRESET} installing $package" exit 1 @@ -422,6 +457,7 @@ while [ ${#} -gt 0 ]; do # required if not mainnet if [ "${2}" == "" ]; then echo "Missing required value for ${1}" + exit 1 fi NETWORK=${2} shift @@ -430,10 +466,29 @@ while [ ${#} -gt 0 ]; do # build from specific branch if [ "${2}" == "" ]; then echo "Missing required value for ${1}" + exit 1 fi BRANCH=${2} shift - ;; + ;; + -c|--chainstate) + # use a local chainstate + if [ "${2}" == "" ]; then + echo "Missing required value for ${1}" + exit 1 + fi + LOCAL_CHAINSTATE="${2}" + shift + ;; + -l|--logdir) + # use a given logdir + if [ "${2}" == "" ]; then + echo "Missing required value for ${1}" + exit 1 + fi + LOG_DIR="${2}" + shift + ;; -r|--RESERVED) # reserve this many cpus for the system (default is 10) if [ "${2}" == "" ]; then @@ -458,8 +513,8 @@ done ## clear display before starting tput reset echo "Replay Started: ${COLYELLOW}$(date)${COLRESET}" -build_stacks_inspect ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) -configure_replay_slices ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) +build_stacks_inspect ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) +configure_replay_slices ## comment if using an existing chainstate/slice dir (ex: replay was performed already, and a second run is desired) setup_replay ## configure logdir and tmux sessions start_replay ## replay pre-nakamoto blocks (2.x) start_replay nakamoto ## replay nakamoto blocks From 14a2ae50ba69c369fc2d55f151137efaeb65eb4f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 10 Mar 2025 12:56:22 -0400 Subject: [PATCH 096/238] chore: tabs to spaces --- contrib/tools/block-replay.sh | 713 +++++++++++++++++----------------- 1 file changed, 357 insertions(+), 356 deletions(-) diff --git a/contrib/tools/block-replay.sh b/contrib/tools/block-replay.sh index 41e04d25d5b..0b6259c73dd 100755 --- a/contrib/tools/block-replay.sh +++ b/contrib/tools/block-replay.sh @@ -40,61 +40,61 @@ COLRESET=$'\033[0m' ## reset color/formatting ## verify that cargo is installed in the expected path, not only $PATH install_cargo() { - command -v "$HOME/.cargo/bin/cargo" >/dev/null 2>&1 || { - echo "Installing Rust via rustup" - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y || { - echo "${COLRED}Error${COLRESET} installing Rust" - exit 1 - } - } - echo "Exporting $HOME/.cargo/env" - # shellcheck source=/dev/null - source "$HOME/.cargo/env" - return 0 + command -v "$HOME/.cargo/bin/cargo" >/dev/null 2>&1 || { + echo "Installing Rust via rustup" + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y || { + echo "${COLRED}Error${COLRESET} installing Rust" + exit 1 + } + } + echo "Exporting $HOME/.cargo/env" + # shellcheck source=/dev/null + source "$HOME/.cargo/env" + return 0 } ## build stacks-inspect binary from specified repo/branch build_stacks_inspect() { - if [ -d "${REPO_DIR}" ];then - echo "Found ${COLYELLOW}${REPO_DIR}${COLRESET}. checking out ${COLGREEN}${BRANCH}${COLRESET} and resetting to ${COLBOLD}HEAD${COLRESET}" - cd "${REPO_DIR}" && git fetch - echo "Checking out ${BRANCH} and resetting to HEAD" - git stash ## stash any local changes to prevent checking out $BRANCH - (git checkout "${BRANCH}" && git reset --hard HEAD) || { - echo "${COLRED}Error${COLRESET} checking out ${BRANCH}" - exit 1 - } - else - echo "Cloning stacks-core ${BRANCH}" - (git clone "https://github.com/${REMOTE_REPO}" --branch "${BRANCH}" "${REPO_DIR}" && cd "${REPO_DIR}") || { - echo "${COLRED}Error${COLRESET} cloning https://github.com/${REMOTE_REPO} into ${REPO_DIR}" - exit 1 - } - fi - git pull - ## build stacks-inspect to: $HOME/stacks-inspect/target/release/stacks-inspect - echo "Building stacks-inspect binary" - cargo build --bin=stacks-inspect --release || { - echo "${COLRED}Error${COLRESET} building stacks-inspect binary" - exit 1 - } - echo "Done building. continuing" + if [ -d "${REPO_DIR}" ];then + echo "Found ${COLYELLOW}${REPO_DIR}${COLRESET}. checking out ${COLGREEN}${BRANCH}${COLRESET} and resetting to ${COLBOLD}HEAD${COLRESET}" + cd "${REPO_DIR}" && git fetch + echo "Checking out ${BRANCH} and resetting to HEAD" + git stash ## stash any local changes to prevent checking out $BRANCH + (git checkout "${BRANCH}" && git reset --hard HEAD) || { + echo "${COLRED}Error${COLRESET} checking out ${BRANCH}" + exit 1 + } + else + echo "Cloning stacks-core ${BRANCH}" + (git clone "https://github.com/${REMOTE_REPO}" --branch "${BRANCH}" "${REPO_DIR}" && cd "${REPO_DIR}") || { + echo "${COLRED}Error${COLRESET} cloning https://github.com/${REMOTE_REPO} into ${REPO_DIR}" + exit 1 + } + fi + git pull + ## build stacks-inspect to: $HOME/stacks-inspect/target/release/stacks-inspect + echo "Building stacks-inspect binary" + cargo build --bin=stacks-inspect --release || { + echo "${COLRED}Error${COLRESET} building stacks-inspect binary" + exit 1 + } + echo "Done building. continuing" } ## create the slice dirs from an chainstate archive (symlinking marf.sqlite.blobs), 1 dir per CPU configure_replay_slices() { - if [ -d "$HOME/scratch" ]; then - echo "Deleting existing scratch dir: ${COLYELLOW}$HOME/scratch${COLRESET}" - rm -rf "${HOME}/scratch" || { - echo "${COLRED}Error${COLRESET} deleting dir $HOME/scratch" - exit 1 - } - fi - echo "Creating scratch and slice dirs" - (mkdir -p "${SLICE_DIR}0" && cd "${SCRATCH_DIR}") || { - echo "${COLRED}Error${COLRESET} creating dir ${SLICE_DIR}" - exit 1 - } + if [ -d "$HOME/scratch" ]; then + echo "Deleting existing scratch dir: ${COLYELLOW}$HOME/scratch${COLRESET}" + rm -rf "${HOME}/scratch" || { + echo "${COLRED}Error${COLRESET} deleting dir $HOME/scratch" + exit 1 + } + fi + echo "Creating scratch and slice dirs" + (mkdir -p "${SLICE_DIR}0" && cd "${SCRATCH_DIR}") || { + echo "${COLRED}Error${COLRESET} creating dir ${SLICE_DIR}" + exit 1 + } if [[ -n "${LOCAL_CHAINSTATE}" ]]; then echo "Copying local chainstate '${LOCAL_CHAINSTATE}'" @@ -114,283 +114,283 @@ configure_replay_slices() { exit } fi - echo "Moving marf database: ${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs -> ${COLYELLOW}${SCRATCH_DIR}/marf.sqlite.blobs${COLRESET}" - mv "${SLICE_DIR}"0/chainstate/vm/clarity/marf.sqlite.blobs "${SCRATCH_DIR}"/ - echo "Symlinking marf database: ${SCRATCH_DIR}/marf.sqlite.blobs -> ${COLYELLOW}${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs${COLRESET}" - ln -s "${SCRATCH_DIR}"/marf.sqlite.blobs "${SLICE_DIR}"0/chainstate/vm/clarity/marf.sqlite.blobs || { - echo "${COLRED}Error${COLRESET} creating symlink: ${SCRATCH_DIR}/marf.sqlite.blobs -> ${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs" - exit 1 - } + echo "Moving marf database: ${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs -> ${COLYELLOW}${SCRATCH_DIR}/marf.sqlite.blobs${COLRESET}" + mv "${SLICE_DIR}"0/chainstate/vm/clarity/marf.sqlite.blobs "${SCRATCH_DIR}"/ + echo "Symlinking marf database: ${SCRATCH_DIR}/marf.sqlite.blobs -> ${COLYELLOW}${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs${COLRESET}" + ln -s "${SCRATCH_DIR}"/marf.sqlite.blobs "${SLICE_DIR}"0/chainstate/vm/clarity/marf.sqlite.blobs || { + echo "${COLRED}Error${COLRESET} creating symlink: ${SCRATCH_DIR}/marf.sqlite.blobs -> ${SLICE_DIR}0/chainstate/vm/clarity/marf.sqlite.blobs" + exit 1 + } - ## create a copy of the linked db with - ## decrement by 1 since we already have ${SLICE_DIR}0 - for ((i=1;i<=$(( CORES - RESERVED - 1));i++)); do - echo "Copying ${SLICE_DIR}0 -> ${COLYELLOW}${SLICE_DIR}${i}${COLRESET}" - cp -R "${SLICE_DIR}0" "${SLICE_DIR}${i}" || { - echo "${COLRED}Error${COLRESET} copying ${SLICE_DIR}0 -> ${SLICE_DIR}${i}" - exit 1 - } - done + ## create a copy of the linked db with + ## decrement by 1 since we already have ${SLICE_DIR}0 + for ((i=1;i<=$(( CORES - RESERVED - 1));i++)); do + echo "Copying ${SLICE_DIR}0 -> ${COLYELLOW}${SLICE_DIR}${i}${COLRESET}" + cp -R "${SLICE_DIR}0" "${SLICE_DIR}${i}" || { + echo "${COLRED}Error${COLRESET} copying ${SLICE_DIR}0 -> ${SLICE_DIR}${i}" + exit 1 + } + done } ## setup the tmux sessions and create the logdir for storing output setup_replay() { - ## if there is an existing folder, rm it - if [ -d "${LOG_DIR}" ];then - echo "Removing logdir ${LOG_DIR}" - rm -rf "${LOG_DIR}" - fi - ## create LOG_DIR to store output files - if [ ! -d "${LOG_DIR}" ]; then - echo "Creating logdir ${LOG_DIR}" - mkdir -p "${LOG_DIR}" - fi - ## if tmux session "replay" exists, kill it and start anew - if eval "tmux list-windows -t ${TMUX_SESSION} &> /dev/null"; then - echo "Killing existing tmux session: ${TMUX_SESSION}" - eval "tmux kill-session -t ${TMUX_SESSION} &> /dev/null" - fi - local slice_counter=0 + ## if there is an existing folder, rm it + if [ -d "${LOG_DIR}" ];then + echo "Removing logdir ${LOG_DIR}" + rm -rf "${LOG_DIR}" + fi + ## create LOG_DIR to store output files + if [ ! -d "${LOG_DIR}" ]; then + echo "Creating logdir ${LOG_DIR}" + mkdir -p "${LOG_DIR}" + fi + ## if tmux session "replay" exists, kill it and start anew + if eval "tmux list-windows -t ${TMUX_SESSION} &> /dev/null"; then + echo "Killing existing tmux session: ${TMUX_SESSION}" + eval "tmux kill-session -t ${TMUX_SESSION} &> /dev/null" + fi + local slice_counter=0 - ## create tmux session named ${TMUX_SESSION} with a window named slice0 - tmux new-session -d -s ${TMUX_SESSION} -n slice${slice_counter} || { - echo "${COLRED}Error${COLRESET} creating tmux session ${COLYELLOW}${TMUX_SESSION}${COLRESET}" - exit 1 - } + ## create tmux session named ${TMUX_SESSION} with a window named slice0 + tmux new-session -d -s ${TMUX_SESSION} -n slice${slice_counter} || { + echo "${COLRED}Error${COLRESET} creating tmux session ${COLYELLOW}${TMUX_SESSION}${COLRESET}" + exit 1 + } - if [ ! -f "${SLICE_DIR}0/chainstate/vm/index.sqlite" ]; then - echo "${COLRED}Error${COLRESET}: chainstate db not found (${SLICE_DIR}0/chainstate/vm/index.sqlite)" - exit 1 - fi - return 0 + if [ ! -f "${SLICE_DIR}0/chainstate/vm/index.sqlite" ]; then + echo "${COLRED}Error${COLRESET}: chainstate db not found (${SLICE_DIR}0/chainstate/vm/index.sqlite)" + exit 1 + fi + return 0 } ## run the block replay start_replay() { - local mode=$1 - local total_blocks=0 - local starting_block=0 - local inspect_command - local slice_counter=0 - case "$mode" in - nakamoto) - ## nakamoto blocks - echo "Mode: ${COLYELLOW}${mode}${COLRESET}" - local log_append="_${mode}" - inspect_command="replay-naka-block" - ## get the total number of nakamoto blocks in db - total_blocks=$(echo "select count(*) from nakamoto_block_headers" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) - starting_block=0 # for the block counter, start at this block - ## use these values if `--testing` arg is provided (only replay 1_000 blocks) - ${TESTING} && total_blocks=301883 - ${TESTING} && starting_block=300883 - ;; - *) - ## pre-nakamoto blocks - echo "Mode: ${COLYELLOW}pre-nakamoto${COLRESET}" - local log_append="" - inspect_command="replay-block" - ## get the total number of blocks (with orphans) in db - total_blocks=$(echo "select count(*) from staging_blocks where orphaned = 0" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) - starting_block=0 # for the block counter, start at this block - ## use these values if `--testing` arg is provided (only replay 1_000 blocks) Note: 2.5 epoch is at 153106 - ${TESTING} && total_blocks=153000 - ${TESTING} && starting_block=15200 - ;; - esac - local block_diff=$((total_blocks - starting_block)) ## how many blocks are being replayed - local slices=$((CORES - RESERVED)) ## how many replay slices to use - local slice_blocks=$((block_diff / slices)) ## how many blocks to replay per slice - ${TESTING} && echo "${COLRED}Testing: ${TESTING}${COLRESET}" - echo "Total blocks: ${COLYELLOW}${total_blocks}${COLRESET}" - echo "Staring Block: ${COLYELLOW}$starting_block${COLRESET}" - echo "Block diff: ${COLYELLOW}$block_diff${COLRESET}" - echo "******************************************************" - echo "Total slices: ${COLYELLOW}${slices}${COLRESET}" - echo "Blocks per slice: ${COLYELLOW}${slice_blocks}${COLRESET}" - local end_block_count=$starting_block - while [[ ${end_block_count} -lt ${total_blocks} ]]; do - local start_block_count=$end_block_count - end_block_count=$((end_block_count + slice_blocks)) - if [[ "${end_block_count}" -gt "${total_blocks}" ]] || [[ "${slice_counter}" -eq $((slices - 1)) ]]; then - end_block_count="${total_blocks}" - fi - if [ "${mode}" != "nakamoto" ]; then ## don't create the tmux windows if we're replaying nakamoto blocks (they should already exist). TODO: check if it does exist in case the function call order changes - if [ "${slice_counter}" -gt 0 ];then - tmux new-window -t replay -d -n "slice${slice_counter}" || { - echo "${COLRED}Error${COLRESET} creating tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" - exit 1 - } - fi - fi - local log_file="${LOG_DIR}/slice${slice_counter}${log_append}.log" - local log=" | tee -a ${log_file}" - local cmd="${REPO_DIR}/target/release/stacks-inspect --config ${REPO_DIR}/stackslib/conf/${NETWORK}-follower-conf.toml ${inspect_command} ${SLICE_DIR}${slice_counter} index-range $start_block_count $end_block_count 2>/dev/null" - echo " Creating tmux window: ${COLGREEN}replay:slice${slice_counter}${COLRESET} :: Blocks: ${COLYELLOW}${start_block_count}-${end_block_count}${COLRESET} || Logging to: ${log_file}" - echo "Command: ${cmd}" > "${log_file}" ## log the command being run for the slice - echo "Replaying indexed blocks: ${start_block_count}-${end_block_count} (out of ${total_blocks})" >> "${log_file}" - ## send `cmd` to the tmux window where the replay will run - tmux send-keys -t "${TMUX_SESSION}:slice${slice_counter}" "${cmd}${log}" Enter || { - echo "${COLRED}Error${COLRESET} sending replay command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" - exit 1 - } - ## log the return code as the last line - tmux send-keys -t "${TMUX_SESSION}:slice${slice_counter}" "echo \${PIPESTATUS[0]} >> ${log_file}" Enter || { - echo "${COLRED}Error${COLRESET} sending return status command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" - exit 1 - } - slice_counter=$((slice_counter + 1)) - done - check_progress + local mode=$1 + local total_blocks=0 + local starting_block=0 + local inspect_command + local slice_counter=0 + case "$mode" in + nakamoto) + ## nakamoto blocks + echo "Mode: ${COLYELLOW}${mode}${COLRESET}" + local log_append="_${mode}" + inspect_command="replay-naka-block" + ## get the total number of nakamoto blocks in db + total_blocks=$(echo "select count(*) from nakamoto_block_headers" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) + starting_block=0 # for the block counter, start at this block + ## use these values if `--testing` arg is provided (only replay 1_000 blocks) + ${TESTING} && total_blocks=301883 + ${TESTING} && starting_block=300883 + ;; + *) + ## pre-nakamoto blocks + echo "Mode: ${COLYELLOW}pre-nakamoto${COLRESET}" + local log_append="" + inspect_command="replay-block" + ## get the total number of blocks (with orphans) in db + total_blocks=$(echo "select count(*) from staging_blocks where orphaned = 0" | sqlite3 "${SLICE_DIR}"0/chainstate/vm/index.sqlite) + starting_block=0 # for the block counter, start at this block + ## use these values if `--testing` arg is provided (only replay 1_000 blocks) Note: 2.5 epoch is at 153106 + ${TESTING} && total_blocks=153000 + ${TESTING} && starting_block=15200 + ;; + esac + local block_diff=$((total_blocks - starting_block)) ## how many blocks are being replayed + local slices=$((CORES - RESERVED)) ## how many replay slices to use + local slice_blocks=$((block_diff / slices)) ## how many blocks to replay per slice + ${TESTING} && echo "${COLRED}Testing: ${TESTING}${COLRESET}" + echo "Total blocks: ${COLYELLOW}${total_blocks}${COLRESET}" + echo "Staring Block: ${COLYELLOW}$starting_block${COLRESET}" + echo "Block diff: ${COLYELLOW}$block_diff${COLRESET}" + echo "******************************************************" + echo "Total slices: ${COLYELLOW}${slices}${COLRESET}" + echo "Blocks per slice: ${COLYELLOW}${slice_blocks}${COLRESET}" + local end_block_count=$starting_block + while [[ ${end_block_count} -lt ${total_blocks} ]]; do + local start_block_count=$end_block_count + end_block_count=$((end_block_count + slice_blocks)) + if [[ "${end_block_count}" -gt "${total_blocks}" ]] || [[ "${slice_counter}" -eq $((slices - 1)) ]]; then + end_block_count="${total_blocks}" + fi + if [ "${mode}" != "nakamoto" ]; then ## don't create the tmux windows if we're replaying nakamoto blocks (they should already exist). TODO: check if it does exist in case the function call order changes + if [ "${slice_counter}" -gt 0 ];then + tmux new-window -t replay -d -n "slice${slice_counter}" || { + echo "${COLRED}Error${COLRESET} creating tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" + exit 1 + } + fi + fi + local log_file="${LOG_DIR}/slice${slice_counter}${log_append}.log" + local log=" | tee -a ${log_file}" + local cmd="${REPO_DIR}/target/release/stacks-inspect --config ${REPO_DIR}/stackslib/conf/${NETWORK}-follower-conf.toml ${inspect_command} ${SLICE_DIR}${slice_counter} index-range $start_block_count $end_block_count 2>/dev/null" + echo " Creating tmux window: ${COLGREEN}replay:slice${slice_counter}${COLRESET} :: Blocks: ${COLYELLOW}${start_block_count}-${end_block_count}${COLRESET} || Logging to: ${log_file}" + echo "Command: ${cmd}" > "${log_file}" ## log the command being run for the slice + echo "Replaying indexed blocks: ${start_block_count}-${end_block_count} (out of ${total_blocks})" >> "${log_file}" + ## send `cmd` to the tmux window where the replay will run + tmux send-keys -t "${TMUX_SESSION}:slice${slice_counter}" "${cmd}${log}" Enter || { + echo "${COLRED}Error${COLRESET} sending replay command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" + exit 1 + } + ## log the return code as the last line + tmux send-keys -t "${TMUX_SESSION}:slice${slice_counter}" "echo \${PIPESTATUS[0]} >> ${log_file}" Enter || { + echo "${COLRED}Error${COLRESET} sending return status command to tmux window ${COLYELLOW}slice${slice_counter}${COLRESET}" + exit 1 + } + slice_counter=$((slice_counter + 1)) + done + check_progress } ## pretty print the status output (simple spinner while pids are active) check_progress() { - # give the pids a few seconds to show up in process table before checking if they're running - local sleep_duration=5 - local progress=1 - local sp="/-\|" - local count - while [ $sleep_duration -gt 0 ]; do - ${TERM_OUT} && printf "Sleeping ... \b [ %s%s%s ] \033[0K\r" "${COLYELLOW}" "${sleep_duration}" "${COLRESET}" - sleep_duration=$((sleep_duration-1)) - sleep 1 - done - echo "************************************************************************" - echo "Checking Block Replay status" - echo -e ' ' - while true; do - count=$(pgrep -c "stacks-inspect") - if [ "${count}" -gt 0 ]; then - ${TERM_OUT} && printf "Block replay processes are currently active [ %s%s%s%s ] ... \b${sp:progress++%${#sp}:1} \033[0K\r" "${COLYELLOW}" "${COLBOLD}" "${count}" "${COLRESET}" - else - ${TERM_OUT} && printf "\r\n" - break - fi - done - echo "************************************************************************" + # give the pids a few seconds to show up in process table before checking if they're running + local sleep_duration=5 + local progress=1 + local sp="/-\|" + local count + while [ $sleep_duration -gt 0 ]; do + ${TERM_OUT} && printf "Sleeping ... \b [ %s%s%s ] \033[0K\r" "${COLYELLOW}" "${sleep_duration}" "${COLRESET}" + sleep_duration=$((sleep_duration-1)) + sleep 1 + done + echo "************************************************************************" + echo "Checking Block Replay status" + echo -e ' ' + while true; do + count=$(pgrep -c "stacks-inspect") + if [ "${count}" -gt 0 ]; then + ${TERM_OUT} && printf "Block replay processes are currently active [ %s%s%s%s ] ... \b${sp:progress++%${#sp}:1} \033[0K\r" "${COLYELLOW}" "${COLBOLD}" "${count}" "${COLRESET}" + else + ${TERM_OUT} && printf "\r\n" + break + fi + done + echo "************************************************************************" } ## store the results in an aggregated logfile and an html file store_results() { - ## text file to store results - local results="${LOG_DIR}/results.log" - ## html file to store results - local results_html="${LOG_DIR}/results.html" - local failed=0; - local return_code=0; - local failure_count - echo "Results: ${COLYELLOW}${results}${COLRESET}" - cd "${LOG_DIR}" || { - echo "${COLRED}Error${COLRESET} Logdir ${COLYELLOW}${LOG_DIR}${COLRESET} doesn't exist" - exit 1 - } - ## retrieve the count of all lines with `Failed processing block` - failure_count=$(grep -rc "Failed processing block" slice*.log | awk -F: '$NF >= 0 {x+=$NF; $NF=""} END{print x}') - if [ "${failure_count}" -gt 0 ]; then - echo "Failures: ${COLRED}${failure_count}${COLRESET}" - else - echo "Failures: ${COLGREEN}${failure_count}${COLRESET}" - fi - echo "Failures: ${failure_count}" > "${results}" - ## check the return codes to see if we had a panic - for file in $(find . -name "slice*.log" -printf '%P\n' | sort); do - # for file in $(ls slice*.log | sort); do - echo "Checking file: ${COLYELLOW}$file${COLRESET}" - return_code=$(tail -1 "${file}") - case ${return_code} in - 0) - # block replay ran successfully - ;; - 1) - # block replay had some block failures - failed=1 - ;; - *) - # return code likely indicates a panic - failed=1 - echo "$file return code: $return_code" >> "${results}" # ok to continue if this write fails - ;; - esac - done + ## text file to store results + local results="${LOG_DIR}/results.log" + ## html file to store results + local results_html="${LOG_DIR}/results.html" + local failed=0; + local return_code=0; + local failure_count + echo "Results: ${COLYELLOW}${results}${COLRESET}" + cd "${LOG_DIR}" || { + echo "${COLRED}Error${COLRESET} Logdir ${COLYELLOW}${LOG_DIR}${COLRESET} doesn't exist" + exit 1 + } + ## retrieve the count of all lines with `Failed processing block` + failure_count=$(grep -rc "Failed processing block" slice*.log | awk -F: '$NF >= 0 {x+=$NF; $NF=""} END{print x}') + if [ "${failure_count}" -gt 0 ]; then + echo "Failures: ${COLRED}${failure_count}${COLRESET}" + else + echo "Failures: ${COLGREEN}${failure_count}${COLRESET}" + fi + echo "Failures: ${failure_count}" > "${results}" + ## check the return codes to see if we had a panic + for file in $(find . -name "slice*.log" -printf '%P\n' | sort); do + # for file in $(ls slice*.log | sort); do + echo "Checking file: ${COLYELLOW}$file${COLRESET}" + return_code=$(tail -1 "${file}") + case ${return_code} in + 0) + # block replay ran successfully + ;; + 1) + # block replay had some block failures + failed=1 + ;; + *) + # return code likely indicates a panic + failed=1 + echo "$file return code: $return_code" >> "${results}" # ok to continue if this write fails + ;; + esac + done - ## Store the results as HTML: - cat <<- _EOF_ > "${results_html}" - - -

$(date -u)

-
-

Failures: ${failure_count}

-
- _EOF_ + ## Store the results as HTML: + cat <<- _EOF_ > "${results_html}" + + +

$(date -u)

+
+

Failures: ${failure_count}

+
+_EOF_ - ## use the $failed var here in case there is a panic, then $failure_count may show zero, but the replay was not successful - if [ ${failed} == "1" ];then - output=$(grep -r -h "Failed processing block" slice*.log) - IFS=$'\n' - for line in ${output}; do - echo "
${line}
" >> "${results_html}" || { - echo "${COLRED}Error${COLRESET} writing failure to: ${results_html}" - } - echo "${line}" >> "${results}" || { - echo "${COLRED}Error${COLRESET} writing failure to: ${results}" - } - done - else - echo "
Test Passed
" >> "${results_html}" - fi - echo "
" >> "${results_html}" - echo "" >> "${results_html}" + ## use the $failed var here in case there is a panic, then $failure_count may show zero, but the replay was not successful + if [ ${failed} == "1" ];then + output=$(grep -r -h "Failed processing block" slice*.log) + IFS=$'\n' + for line in ${output}; do + echo "
${line}
" >> "${results_html}" || { + echo "${COLRED}Error${COLRESET} writing failure to: ${results_html}" + } + echo "${line}" >> "${results}" || { + echo "${COLRED}Error${COLRESET} writing failure to: ${results}" + } + done + else + echo "
Test Passed
" >> "${results_html}" + fi + echo "
" >> "${results_html}" + echo "" >> "${results_html}" } ## show usage and exit usage() { - echo - echo "Usage:" - echo " ${COLBOLD}${0}${COLRESET}" - echo " ${COLYELLOW}--testing${COLRESET}: only check a small number of blocks" - echo " ${COLYELLOW}-t|--terminal${COLRESET}: more terminal friendly output" - echo " ${COLYELLOW}-n|--network${COLRESET}: run block replay against specific network (default: mainnet)" - echo " ${COLYELLOW}-b|--branch${COLRESET}: branch of stacks-core to build stacks-inspect from (default: develop)" - echo " ${COLYELLOW}-c|--chainstate${COLRESET}: local chainstate copy to use instead of downloading a chainstaet snapshot" + echo + echo "Usage:" + echo " ${COLBOLD}${0}${COLRESET}" + echo " ${COLYELLOW}--testing${COLRESET}: only check a small number of blocks" + echo " ${COLYELLOW}-t|--terminal${COLRESET}: more terminal friendly output" + echo " ${COLYELLOW}-n|--network${COLRESET}: run block replay against specific network (default: mainnet)" + echo " ${COLYELLOW}-b|--branch${COLRESET}: branch of stacks-core to build stacks-inspect from (default: develop)" + echo " ${COLYELLOW}-c|--chainstate${COLRESET}: local chainstate copy to use instead of downloading a chainstaet snapshot" echo " ${COLYELLOW}-l|--logdir${COLRESET}: use existing log directory" - echo " ${COLYELLOW}-r|--reserved${COLRESET}: how many cpu cores to reserve for system tasks" - echo - echo " ex: ${COLCYAN}${0} -t -u ${COLRESET}" - echo - exit 0 + echo " ${COLYELLOW}-r|--reserved${COLRESET}: how many cpu cores to reserve for system tasks" + echo + echo " ex: ${COLCYAN}${0} -t -u ${COLRESET}" + echo + exit 0 } @@ -400,15 +400,16 @@ HAS_SUDO=1 for cmd in apt-get sudo curl tmux git wget tar gzip grep cargo pgrep tput find; do # in Alpine, `find` might be linked to `busybox` and won't work if [ "${cmd}" == "find" ] && [ -L "${cmd}" ]; then - local rp="$(readlink "$(command -v "${cmd}" || echo "NOTLINK")")" + rp= + rp="$(readlink "$(command -v "${cmd}" || echo "NOTLINK")")" if [ "${rp}" == "/bin/busybox" ]; then echo "${COLRED}ERROR${COLRESET} Busybox 'find' is not supported. Please install 'findutils' or similar." exit 1 fi fi - command -v "${cmd}" >/dev/null 2>&1 || { - case "${cmd}" in + command -v "${cmd}" >/dev/null 2>&1 || { + case "${cmd}" in "apt-get") echo "${COLYELLOW}WARN${COLRESET} 'apt-get' not found; automatic package installation will fail" HAS_APT=0 @@ -419,58 +420,58 @@ for cmd in apt-get sudo curl tmux git wget tar gzip grep cargo pgrep tput find; HAS_SUDO=0 continue ;; - "cargo") - install_cargo - ;; - "pgrep") - package="procps" - ;; - *) - package="${cmd}" - ;; - esac + "cargo") + install_cargo + ;; + "pgrep") + package="procps" + ;; + *) + package="${cmd}" + ;; + esac if [[ ${HAS_APT} = 0 ]] || [[ ${HAS_SUDO} = 0 ]]; then echo "${COLRED}Error${COLRESET} Missing command '${cmd}'" exit 1 fi - (sudo apt-get update && sudo apt-get install "${package}") || { - echo "${COLRED}Error${COLRESET} installing $package" - exit 1 - } - } + (sudo apt-get update && sudo apt-get install "${package}") || { + echo "${COLRED}Error${COLRESET} installing $package" + exit 1 + } + } done ## parse cmd-line args while [ ${#} -gt 0 ]; do - case ${1} in - --testing) - # only replay 1_000 blocks - TESTING=true - ;; - -t|--terminal) - # update terminal with progress (it's just printf to show in real-time that the replays are running) - TERM_OUT=true - ;; - -n|--network) - # required if not mainnet - if [ "${2}" == "" ]; then - echo "Missing required value for ${1}" + case ${1} in + --testing) + # only replay 1_000 blocks + TESTING=true + ;; + -t|--terminal) + # update terminal with progress (it's just printf to show in real-time that the replays are running) + TERM_OUT=true + ;; + -n|--network) + # required if not mainnet + if [ "${2}" == "" ]; then + echo "Missing required value for ${1}" exit 1 - fi - NETWORK=${2} - shift - ;; - -b|--branch) - # build from specific branch - if [ "${2}" == "" ]; then - echo "Missing required value for ${1}" + fi + NETWORK=${2} + shift + ;; + -b|--branch) + # build from specific branch + if [ "${2}" == "" ]; then + echo "Missing required value for ${1}" exit 1 - fi - BRANCH=${2} - shift - ;; + fi + BRANCH=${2} + shift + ;; -c|--chainstate) # use a local chainstate if [ "${2}" == "" ]; then @@ -489,24 +490,24 @@ while [ ${#} -gt 0 ]; do LOG_DIR="${2}" shift ;; - -r|--RESERVED) - # reserve this many cpus for the system (default is 10) - if [ "${2}" == "" ]; then - echo "Missing required value for ${1}" - fi - if ! [[ "$2" =~ ^[0-9]+$ ]]; then - echo "ERROR: arg ($2) is not a number." >&2 - exit 1 - fi - RESERVED=${2} - shift - ;; - -h|--help|--usage) - # show usage/options and exit - usage - ;; - esac - shift + -r|--RESERVED) + # reserve this many cpus for the system (default is 10) + if [ "${2}" == "" ]; then + echo "Missing required value for ${1}" + fi + if ! [[ "$2" =~ ^[0-9]+$ ]]; then + echo "ERROR: arg ($2) is not a number." >&2 + exit 1 + fi + RESERVED=${2} + shift + ;; + -h|--help|--usage) + # show usage/options and exit + usage + ;; + esac + shift done From c931bb3af77e298b835f94b8e8c8f851e2b6eb50 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Tue, 11 Mar 2025 16:15:29 +0100 Subject: [PATCH 097/238] fix: make tenure donwloader use proper logging level depending on the tenure state (debug if unconfirmed, info otherwise), #5871 --- .../download/nakamoto/tenure_downloader.rs | 4 ++++ .../nakamoto/tenure_downloader_set.rs | 21 ++++++++++++------- .../nakamoto/tenure_downloader_unconfirmed.rs | 1 + stackslib/src/net/tests/download/nakamoto.rs | 1 + 4 files changed, 19 insertions(+), 8 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 14e6c20eebe..a2a5a9b2d95 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -149,6 +149,8 @@ pub struct NakamotoTenureDownloader { pub tenure_end_block: Option, /// Tenure blocks pub tenure_blocks: Option>, + /// Whether this tenure is unconfirmed + pub is_tenure_unconfirmed: bool, } impl NakamotoTenureDownloader { @@ -161,6 +163,7 @@ impl NakamotoTenureDownloader { naddr: NeighborAddress, start_signer_keys: RewardSet, end_signer_keys: RewardSet, + is_tenure_unconfirmed: bool, ) -> Self { debug!( "Instantiate downloader to {}-{} for tenure {}: {}-{}", @@ -187,6 +190,7 @@ impl NakamotoTenureDownloader { tenure_start_block: None, tenure_end_block: None, tenure_blocks: None, + is_tenure_unconfirmed, } } diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 5a1990961b8..8b62133641a 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -520,6 +520,7 @@ impl NakamotoTenureDownloaderSet { naddr.clone(), start_reward_set.clone(), end_reward_set.clone(), + false, ); debug!("Request tenure {ch} from neighbor {naddr}"); @@ -671,14 +672,18 @@ impl NakamotoTenureDownloaderSet { ); new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); if downloader.is_done() { - info!( - "Downloader for tenure {} is finished", - &downloader.tenure_id_consensus_hash - ); - debug!( - "Downloader for tenure {} finished on {naddr}", - &downloader.tenure_id_consensus_hash, - ); + if downloader.is_tenure_unconfirmed { + debug!( + "Downloader for tenure {} finished on {naddr}", + &downloader.tenure_id_consensus_hash, + ); + } else { + info!( + "Downloader for tenure {} is finished", + &downloader.tenure_id_consensus_hash + ); + } + finished.push(naddr.clone()); finished_tenures.push(CompletedTenure::from(downloader)); continue; diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index 2a330edb78a..579ee3e4949 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -742,6 +742,7 @@ impl NakamotoUnconfirmedTenureDownloader { self.naddr.clone(), confirmed_signer_keys.clone(), unconfirmed_signer_keys.clone(), + true, ); Ok(ntd) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index cc53f22a4f1..987e4f3bacb 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -292,6 +292,7 @@ fn test_nakamoto_tenure_downloader() { naddr, reward_set.clone(), reward_set, + false, ); // must be first block From 635cd48fcc56605df47b719d07ebd294f5194a62 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 11 Mar 2025 11:17:04 -0400 Subject: [PATCH 098/238] test: add `large_mempool` integration test --- .github/workflows/bitcoin-tests.yml | 2 + stackslib/src/chainstate/stacks/miner.rs | 3 +- stackslib/src/core/mempool.rs | 1 - testnet/stacks-node/src/tests/mempool.rs | 8 +- testnet/stacks-node/src/tests/mod.rs | 114 ++++-- .../src/tests/nakamoto_integrations.rs | 384 +++++++++++++++++- 6 files changed, 472 insertions(+), 40 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 6bd63f11a77..53c49448918 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -123,6 +123,8 @@ jobs: - test-name: tests::epoch_24::verify_auto_unlock_behavior # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition - test-name: tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY + # This test takes a long time to run, and is meant to be run manually + - test-name: tests::nakamoto_integrations::large_mempool steps: ## Setup test environment diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 819de80a333..a30f8483f0b 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2287,7 +2287,7 @@ impl StacksBlockBuilder { } } - mempool.reset_nonce_cache()?; + // TODO: Should we fill in missing nonces here too? mempool.estimate_tx_rates(100, &block_limit, &stacks_epoch_id)?; let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; @@ -2652,6 +2652,7 @@ impl StacksBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); + mempool.reset_nonce_cache()?; let (blocked, tx_events) = match Self::select_and_apply_transactions( &mut epoch_tx, &mut builder, diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index cbc40092518..a1eaabdbc61 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1569,7 +1569,6 @@ impl MemPoolDB { SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate FROM address_nonce_ranked ORDER BY origin_rank ASC, sponsor_rank ASC, sort_fee_rate DESC - LIMIT 1 "; let mut query_stmt_nonce_rank = self.db.prepare(&sql).map_err(Error::SqliteError)?; let mut nonce_rank_iterator = query_stmt_nonce_rank diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index fa831815293..5b8a07b56db 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -26,7 +26,7 @@ use stacks_common::util::secp256k1::*; use super::{ make_coinbase, make_contract_call, make_contract_publish, make_poison, make_stacks_transfer, - serialize_sign_standard_single_sig_tx_anchor_mode_version, to_addr, SK_1, SK_2, + sign_standard_single_sig_tx_anchor_mode_version, to_addr, SK_1, SK_2, }; use crate::helium::RunLoop; use crate::Keychain; @@ -506,7 +506,7 @@ fn mempool_setup_chainstate() { 1000, TokenTransferMemo([0; 34]), ); - let tx_bytes = serialize_sign_standard_single_sig_tx_anchor_mode_version( + let tx = sign_standard_single_sig_tx_anchor_mode_version( payload, &contract_sk, 5, @@ -515,8 +515,8 @@ fn mempool_setup_chainstate() { TransactionAnchorMode::OnChainOnly, TransactionVersion::Mainnet, ); - let tx = - StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); let e = chain_state .will_admit_mempool_tx( &NULL_BURN_STATE_DB, diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index a4546d231b7..c4230ce61f9 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -134,7 +134,7 @@ pub fn insert_new_port(port: u16) -> bool { } #[allow(clippy::too_many_arguments)] -pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( +pub fn sign_sponsored_sig_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, payer: &StacksPrivateKey, @@ -144,8 +144,8 @@ pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( chain_id: u32, anchor_mode: TransactionAnchorMode, version: TransactionVersion, -) -> Vec { - serialize_sign_tx_anchor_mode_version( +) -> StacksTransaction { + sign_tx_anchor_mode_version( payload, sender, Some(payer), @@ -158,14 +158,14 @@ pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( ) } -pub fn serialize_sign_standard_single_sig_tx( +pub fn sign_standard_single_sig_tx( payload: TransactionPayload, sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32, -) -> Vec { - serialize_sign_standard_single_sig_tx_anchor_mode( +) -> StacksTransaction { + sign_standard_single_sig_tx_anchor_mode( payload, sender, nonce, @@ -175,15 +175,15 @@ pub fn serialize_sign_standard_single_sig_tx( ) } -pub fn serialize_sign_standard_single_sig_tx_anchor_mode( +pub fn sign_standard_single_sig_tx_anchor_mode( payload: TransactionPayload, sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32, anchor_mode: TransactionAnchorMode, -) -> Vec { - serialize_sign_standard_single_sig_tx_anchor_mode_version( +) -> StacksTransaction { + sign_standard_single_sig_tx_anchor_mode_version( payload, sender, nonce, @@ -194,7 +194,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode( ) } -pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( +pub fn sign_standard_single_sig_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, nonce: u64, @@ -202,8 +202,8 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( chain_id: u32, anchor_mode: TransactionAnchorMode, version: TransactionVersion, -) -> Vec { - serialize_sign_tx_anchor_mode_version( +) -> StacksTransaction { + sign_tx_anchor_mode_version( payload, sender, None, @@ -217,7 +217,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( } #[allow(clippy::too_many_arguments)] -pub fn serialize_sign_tx_anchor_mode_version( +pub fn sign_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, payer: Option<&StacksPrivateKey>, @@ -227,7 +227,7 @@ pub fn serialize_sign_tx_anchor_mode_version( chain_id: u32, anchor_mode: TransactionAnchorMode, version: TransactionVersion, -) -> Vec { +) -> StacksTransaction { let mut sender_spending_condition = TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sender)) .expect("Failed to create p2pkh spending condition from public key."); @@ -259,12 +259,35 @@ pub fn serialize_sign_tx_anchor_mode_version( tx_signer.sign_sponsor(payer).unwrap(); } + tx_signer.get_tx().unwrap() +} + +#[allow(clippy::too_many_arguments)] +pub fn serialize_sign_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + payer: Option<&StacksPrivateKey>, + sender_nonce: u64, + payer_nonce: Option, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> Vec { + let tx = sign_tx_anchor_mode_version( + payload, + sender, + payer, + sender_nonce, + payer_nonce, + tx_fee, + chain_id, + anchor_mode, + version, + ); + let mut buf = vec![]; - tx_signer - .get_tx() - .unwrap() - .consensus_serialize(&mut buf) - .unwrap(); + tx.consensus_serialize(&mut buf).unwrap(); buf } @@ -283,7 +306,10 @@ pub fn make_contract_publish_versioned( let payload = TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes } pub fn make_contract_publish( @@ -320,14 +346,17 @@ pub fn make_contract_publish_microblock_only_versioned( let payload = TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - serialize_sign_standard_single_sig_tx_anchor_mode( + let tx = sign_standard_single_sig_tx_anchor_mode( payload, sender, nonce, tx_fee, chain_id, TransactionAnchorMode::OffChainOnly, - ) + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes } pub fn make_contract_publish_microblock_only( @@ -429,7 +458,10 @@ pub fn make_stacks_transfer( ) -> Vec { let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes } #[allow(clippy::too_many_arguments)] @@ -445,7 +477,7 @@ pub fn make_sponsored_stacks_transfer_on_testnet( ) -> Vec { let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_sponsored_sig_tx_anchor_mode_version( + let tx = sign_sponsored_sig_tx_anchor_mode_version( payload, sender, payer, @@ -455,7 +487,10 @@ pub fn make_sponsored_stacks_transfer_on_testnet( chain_id, TransactionAnchorMode::OnChainOnly, TransactionVersion::Testnet, - ) + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes } pub fn make_stacks_transfer_mblock_only( @@ -468,14 +503,17 @@ pub fn make_stacks_transfer_mblock_only( ) -> Vec { let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx_anchor_mode( + let tx = sign_standard_single_sig_tx_anchor_mode( payload, sender, nonce, tx_fee, chain_id, TransactionAnchorMode::OffChainOnly, - ) + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes } pub fn make_poison( @@ -487,12 +525,18 @@ pub fn make_poison( header_2: StacksMicroblockHeader, ) -> Vec { let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes } pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); - serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes } #[allow(clippy::too_many_arguments)] @@ -516,7 +560,10 @@ pub fn make_contract_call( function_args: function_args.to_vec(), }; - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + let tx = sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes } #[allow(clippy::too_many_arguments)] @@ -540,14 +587,17 @@ pub fn make_contract_call_mblock_only( function_args: function_args.to_vec(), }; - serialize_sign_standard_single_sig_tx_anchor_mode( + let tx = sign_standard_single_sig_tx_anchor_mode( payload.into(), sender, nonce, tx_fee, chain_id, TransactionAnchorMode::OffChainOnly, - ) + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes } fn make_microblock( diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index ebd2bc5c4e5..6d1f12b9269 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::File; +use std::io::Cursor; use std::ops::RangeBounds; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::mpsc::{channel, Receiver, Sender}; @@ -23,6 +24,7 @@ use std::thread::JoinHandle; use std::time::{Duration, Instant}; use std::{env, thread}; +use chrono::Utc; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; @@ -31,7 +33,7 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::{RejectReason, SignerMessage as SignerMessageV0}; use libsigner::{SignerSession, StackerDBSession}; -use rusqlite::OptionalExtension; +use rusqlite::{params, Connection, OptionalExtension}; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ @@ -61,7 +63,7 @@ use stacks::chainstate::stacks::{ TransactionVersion, MAX_BLOCK_LEN, }; use stacks::config::{EventKeyType, InitialBalance}; -use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; +use stacks::core::mempool::{MemPoolWalkStrategy, MAXIMUM_MEMPOOL_TX_CHAINING}; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -11229,3 +11231,381 @@ fn reload_miner_config() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool. It will then unpause block mining and check +/// how long it takes for the miner to mine the first block, and how long it +/// takes to empty the mempool. +fn large_mempool() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + + let sender_signer_sk = Secp256k1PrivateKey::random(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. + // With a fee of 180 uSTX per send, we need each account to end up with + // 181 * 25 * 2 = 9_050 uSTX. + // The 260 accounts in the middle will need to have + // (9050 + 180) * 26 = 239_980 uSTX. + // The 10 initial accounts will need to have + // (239980 + 180) * 26 = 6_244_160 uSTX. + let initial_balance = 6_244_160; + for addr in initial_sender_addrs.iter() { + naka_conf.add_initial_balance(PrincipalData::from(*addr).to_string(), initial_balance); + } + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + naka_conf.node.working_dir + ); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_proposed_blocks, + .. + } = run_loop.counters(); + let counters = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + blind_signer(&naka_conf, &signers, &counters); + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + + // Open a sqlite DB at mempool_db_path and insert a large number of transactions + // into the mempool. We will then mine a block and check how long it takes to + // mine the block and how long it takes to empty the mempool. + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let mut conn = Connection::open(&mempool_db_path).unwrap(); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let sql = "INSERT OR REPLACE INTO mempool ( + txid, + origin_address, + origin_nonce, + sponsor_address, + sponsor_nonce, + tx_fee, + length, + consensus_hash, + block_header_hash, + height, + accept_time, + tx) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)"; + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 239_980, + ); + let length = transfer_tx.len() as u64; + let mut cursor = Cursor::new(transfer_tx.clone()); + let tx = StacksTransaction::consensus_deserialize(&mut cursor).unwrap(); + let txid = tx.txid(); + let args = params![ + txid, + sender_addr.to_string(), + *nonce, + sender_addr.to_string(), + *nonce, + transfer_fee, + length, + tip.consensus_hash, + tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + Utc::now().timestamp(), + transfer_tx + ]; + db_tx.execute(sql, args).unwrap(); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 9_050, + ); + submit_tx(&http_origin, &transfer_tx); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the first round of transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let timer = Instant::now(); + + // Fill the mempool with the first round of transfers + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_nonce = *nonce; + let transfer_tx = make_stacks_transfer( + sender_sk, + sender_nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient, + 1, + ); + // TODO: Insert these txs directly into the DB instead of using + // this RPC call + submit_tx(&http_origin, &transfer_tx); + *nonce += 1; + } + } + + info!( + "Sending first round of transfers took {:?}", + timer.elapsed() + ); + + let blocks_proposed_before = naka_proposed_blocks.load(Ordering::SeqCst); + + info!("Mining first round of transfers"); + + let timer = Instant::now(); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(60, || { + let blocks_proposed = naka_proposed_blocks.load(Ordering::SeqCst); + Ok(blocks_proposed > blocks_proposed_before) + }) + .expect("Timed out waiting for first block to be mined"); + + info!( + "Mining first block of first round of transfers took {:?}", + timer.elapsed() + ); + + // Wait for the first round of transfers to all be mined + wait_for(1200, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of transfers to be mined"); + + info!("Mining first round of transfers took {:?}", timer.elapsed()); + + info!("Pause mining and fill the mempool with the second round of transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let timer = Instant::now(); + + // Fill the mempool with the second round of transfers + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_nonce = *nonce; + let transfer_tx = make_stacks_transfer( + sender_sk, + sender_nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient, + 1, + ); + submit_tx(&http_origin, &transfer_tx); + *nonce += 1; + } + } + + info!( + "Sending second round of transfers took {:?}", + timer.elapsed() + ); + + let blocks_proposed_before = naka_proposed_blocks.load(Ordering::SeqCst); + info!("Mining second round of transfers"); + let timer = Instant::now(); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(60, || { + let blocks_proposed = naka_proposed_blocks.load(Ordering::SeqCst); + Ok(blocks_proposed > blocks_proposed_before) + }) + .expect("Timed out waiting for first block to be mined"); + + info!( + "Mining first block of second round of transfers took {:?}", + timer.elapsed() + ); + + // Wait for the second round of transfers to all be mined + wait_for(600, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of transfers to be mined"); + + info!( + "Mining second round of transfers took {:?}", + timer.elapsed() + ); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From ace8fc3dde09c3f937e88df334c18f3ad6793b39 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Tue, 11 Mar 2025 16:21:56 +0100 Subject: [PATCH 099/238] docs: update changelog, #5871 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d950419f07..d1a4cdf5c2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - When a miner times out waiting for signatures, it will re-propose the same block instead of building a new block ([#5877](https://github.com/stacks-network/stacks-core/pull/5877)) +- Improve tenure downloader trace verbosity applying proper logging level depending on the tenure state ("debug" if unconfirmed, "info" otherwise) ([#5871](https://github.com/stacks-network/stacks-core/issues/5871)) ## [3.1.0.0.7] From 64d85c26544083d1912dce3feaa10ee445895d1e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 7 Mar 2025 13:29:16 -0500 Subject: [PATCH 100/238] chore: cleanup --- stackslib/src/chainstate/stacks/miner.rs | 46 +------------- stackslib/src/core/nonce_cache.rs | 79 +++++++++++++++++++++++- 2 files changed, 77 insertions(+), 48 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index a30f8483f0b..0cd782a2210 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2292,8 +2292,6 @@ impl StacksBlockBuilder { let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; let mut considered = HashSet::new(); // txids of all transactions we looked at - let mut mined_origin_nonces: HashMap = HashMap::new(); // map addrs of mined transaction origins to the nonces we used - let mut mined_sponsor_nonces: HashMap = HashMap::new(); // map addrs of mined transaction sponsors to the nonces we used let mut invalidated_txs = vec![]; let mut to_drop_and_blacklist = vec![]; @@ -2360,40 +2358,6 @@ impl StacksBlockBuilder { )); } - if let Some(nonce) = mined_origin_nonces.get(&txinfo.tx.origin_address()) { - if *nonce >= txinfo.tx.get_origin_nonce() { - return Ok(Some( - TransactionResult::skipped( - &txinfo.tx, - format!( - "Bad origin nonce, tx nonce {} versus {}.", - txinfo.tx.get_origin_nonce(), - *nonce - ), - ) - .convert_to_event(), - )); - } - } - if let Some(sponsor_addr) = txinfo.tx.sponsor_address() { - if let Some(nonce) = mined_sponsor_nonces.get(&sponsor_addr) { - if let Some(sponsor_nonce) = txinfo.tx.get_sponsor_nonce() { - if *nonce >= sponsor_nonce { - return Ok(Some( - TransactionResult::skipped( - &txinfo.tx, - format!( - "Bad sponsor nonce, tx nonce {} versus {}.", - sponsor_nonce, *nonce - ), - ) - .convert_to_event(), - )); - } - } - } - } - considered.insert(txinfo.tx.txid()); num_considered += 1; @@ -2445,15 +2409,7 @@ impl StacksBlockBuilder { "error" => ?e); } } - mined_origin_nonces.insert( - txinfo.tx.origin_address(), - txinfo.tx.get_origin_nonce(), - ); - if let (Some(sponsor_addr), Some(sponsor_nonce)) = - (txinfo.tx.sponsor_address(), txinfo.tx.get_sponsor_nonce()) - { - mined_sponsor_nonces.insert(sponsor_addr, sponsor_nonce); - } + if soft_limit_reached { // done mining -- our soft limit execution budget is exceeded. // Make the block from the transactions we did manage to get diff --git a/stackslib/src/core/nonce_cache.rs b/stackslib/src/core/nonce_cache.rs index 80a8e3ffc75..842f7825e72 100644 --- a/stackslib/src/core/nonce_cache.rs +++ b/stackslib/src/core/nonce_cache.rs @@ -101,9 +101,8 @@ impl NonceCache { } } - /// Store the (address, nonce) pair to the `nonces` table. - /// If storage fails, return false. - /// Otherwise return true. + /// Set the nonce for `address` to `value` in the in-memory cache. + /// If this causes an eviction, flush the in-memory cache to the DB. pub fn set(&mut self, address: StacksAddress, value: u64, conn: &mut DBConn) { let evicted = self.cache.insert(address.clone(), value); if evicted.is_some() { @@ -112,6 +111,8 @@ impl NonceCache { } } + /// Flush the in-memory cache the the DB, including `evicted`. + /// Do not return until successful. pub fn flush_with_evicted(&mut self, conn: &mut DBConn, evicted: Option<(StacksAddress, u64)>) { const MAX_BACKOFF: Duration = Duration::from_secs(30); let mut backoff = Duration::from_millis(rand::thread_rng().gen_range(50..200)); @@ -138,6 +139,7 @@ impl NonceCache { } } + /// Try to flush the in-memory cache the the DB, including `evicted`. pub fn try_flush_with_evicted( &mut self, conn: &mut DBConn, @@ -162,6 +164,7 @@ impl NonceCache { Ok(()) } + /// Flush the in-memory cache the the DB. pub fn flush(&mut self, conn: &mut DBConn) { self.flush_with_evicted(conn, None) } @@ -250,4 +253,74 @@ mod tests { db_set_nonce(&conn, &addr, 123).unwrap(); assert_eq!(db_get_nonce(&conn, &addr).unwrap().unwrap(), 123); } + + #[test] + fn test_nonce_cache_eviction() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let mut cache = NonceCache::new(2); // Cache size of 2 + + let addr1 = + StacksAddress::from_string("ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM").unwrap(); + let addr2 = + StacksAddress::from_string("ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5").unwrap(); + let addr3 = + StacksAddress::from_string("ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG").unwrap(); + + let conn = &mut mempool.db; + + // Fill cache to capacity + cache.set(addr1.clone(), 1, conn); + cache.set(addr2.clone(), 2, conn); + + // This should cause addr1 to be evicted + cache.set(addr3.clone(), 3, conn); + + // Verify addr1 was written to DB during eviction + assert_eq!(db_get_nonce(&conn, &addr1).unwrap().unwrap(), 1); + } + + #[test] + fn test_nonce_cache_flush() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let mut cache = NonceCache::new(3); + + let addr1 = + StacksAddress::from_string("ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM").unwrap(); + let addr2 = + StacksAddress::from_string("ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5").unwrap(); + + let conn = &mut mempool.db; + + cache.set(addr1.clone(), 5, conn); + cache.set(addr2.clone(), 10, conn); + + // Explicitly flush cache + cache.flush(conn); + + // Verify both entries were written to DB + assert_eq!(db_get_nonce(&conn, &addr1).unwrap().unwrap(), 5); + assert_eq!(db_get_nonce(&conn, &addr2).unwrap().unwrap(), 10); + } + + #[test] + fn test_db_nonce_overwrite() { + let _chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, CHAIN_ID_TESTNET, &chainstate_path).unwrap(); + let conn = &mut mempool.db; + + let addr = StacksAddress::from_string("ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC").unwrap(); + + // Set initial nonce + db_set_nonce(&conn, &addr, 1).unwrap(); + assert_eq!(db_get_nonce(&conn, &addr).unwrap().unwrap(), 1); + + // Overwrite with new nonce + db_set_nonce(&conn, &addr, 2).unwrap(); + assert_eq!(db_get_nonce(&conn, &addr).unwrap().unwrap(), 2); + } } From 7542b69ec1b5d17ef51d36ac2456a20a8ca6efb8 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 11 Mar 2025 08:46:39 -0700 Subject: [PATCH 101/238] fix: update comment in test --- testnet/stacks-node/src/event_dispatcher.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index d8479792cc4..d4c175ae01e 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -2220,7 +2220,8 @@ mod test { let pending_payloads = EventDispatcher::get_pending_payloads(&conn).expect("Failed to get pending payloads"); - // Verify that the pending payload is still in the database + // Verify that the pending payload is no longer in the database, + // because this observer is no longer registered. assert_eq!( pending_payloads.len(), 0, From 17a5ac356bcf331c2a9ba5d666efb22e9ac24577 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 11 Mar 2025 11:55:50 -0400 Subject: [PATCH 102/238] docs: add instructions for installing nix --- contrib/nix/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/contrib/nix/README.md b/contrib/nix/README.md index 7c47c2357cb..72b996e068f 100644 --- a/contrib/nix/README.md +++ b/contrib/nix/README.md @@ -4,6 +4,11 @@ Build `stacks-node` and `stacks-signer` by pointing to the `flake.nix` file in this directory. For instance, from the root directory: `nix build './contrib/nix'`. +## Installing `nix` + +Follow the [official documentation](https://nix.dev/install-nix) or use the +[Determinate Nix Installer](https://github.com/DeterminateSystems/nix-installer). + ## Using `direnv` If using `direnv`, from the root directory of this repository: From b4e0f72e2f7a206fa166a8c9850de47569a3b3cf Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 11 Mar 2025 12:06:16 -0400 Subject: [PATCH 103/238] chore: revert changelog changes due to bad merge --- CHANGELOG.md | 4 ++-- stacks-signer/CHANGELOG.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0c00e1c6ed..3889d73e740 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,10 +5,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## [3.1.0.0.7] +## [Unreleased] ### Added" -- Add fee information to transaction log ending with "success" or "skipped", while building a new block +- Add fee information to transaction log ending with "success" or "skipped", while building a new block ### Changed diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 340dd47427b..35ae5d07c40 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). -## [3.1.0.0.7.0] +## [Unreleased] ### Changed From c4dc334a4de63e07b310adf49bb976242e3969cf Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 11 Mar 2025 16:36:43 -0400 Subject: [PATCH 104/238] chore: fix clippy warning about `Instant` --- stackslib/src/main.rs | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 80f1f8c07cb..46838f217df 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -37,7 +37,7 @@ use std::fs::File; use std::io::prelude::*; use std::io::BufReader; use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; -use std::time::Duration; +use std::time::{Duration, Instant}; use std::{env, fs, io, process, thread}; use blockstack_lib::burnchains::bitcoin::{spv, BitcoinNetworkType}; @@ -605,7 +605,7 @@ Given a , obtain a 2100 header hash block inventory (with an empty let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) .expect("Failed to get sortition chain tip"); - let start = time::Instant::now(); + let start = Instant::now(); let header_hashes = { let ic = sort_db.index_conn(); @@ -614,14 +614,11 @@ Given a , obtain a 2100 header hash block inventory (with an empty .unwrap() }; - println!( - "Fetched header hashes in {}", - start.elapsed().as_seconds_f32() - ); - let start = time::Instant::now(); + println!("Fetched header hashes in {}", start.elapsed().as_secs_f32()); + let start = Instant::now(); let block_inv = chain_state.get_blocks_inventory(&header_hashes).unwrap(); - println!("Fetched block inv in {}", start.elapsed().as_seconds_f32()); + println!("Fetched block inv in {}", start.elapsed().as_secs_f32()); println!("{:?}", &block_inv); println!("Done!"); @@ -652,7 +649,7 @@ check if the associated microblocks can be downloaded let chain_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) .expect("Failed to get sortition chain tip"); - let start = time::Instant::now(); + let start = Instant::now(); let local_peer = LocalPeer::new( 0, 0, @@ -671,12 +668,9 @@ check if the associated microblocks can be downloaded .unwrap() }; - println!( - "Fetched header hashes in {}", - start.elapsed().as_seconds_f32() - ); + println!("Fetched header hashes in {}", start.elapsed().as_secs_f32()); - let start = time::Instant::now(); + let start = Instant::now(); let mut total_load_headers = 0; for (consensus_hash, block_hash_opt) in header_hashes.iter() { @@ -736,7 +730,7 @@ check if the associated microblocks can be downloaded println!( "Checked can_download in {} (headers load took {}ms)", - start.elapsed().as_seconds_f32(), + start.elapsed().as_secs_f32(), total_load_headers ); From 0832f24496cd1391b0286f2a25d07040070fc95b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 11 Mar 2025 17:31:01 -0400 Subject: [PATCH 105/238] chore: additional uses of `time::Instant` --- stackslib/src/cost_estimates/tests/cost_estimators.rs | 2 +- stackslib/src/cost_estimates/tests/fee_scalar.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/cost_estimates/tests/cost_estimators.rs b/stackslib/src/cost_estimates/tests/cost_estimators.rs index 927c0a50d8a..e3b2515e018 100644 --- a/stackslib/src/cost_estimates/tests/cost_estimators.rs +++ b/stackslib/src/cost_estimates/tests/cost_estimators.rs @@ -1,5 +1,6 @@ use std::env; use std::path::PathBuf; +use std::time::Instant; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, StandardPrincipalData}; @@ -11,7 +12,6 @@ use stacks_common::types::chainstate::{ }; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::vrf::VRFProof; -use time::Instant; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::db::{StacksEpochReceipt, StacksHeaderInfo}; diff --git a/stackslib/src/cost_estimates/tests/fee_scalar.rs b/stackslib/src/cost_estimates/tests/fee_scalar.rs index 04c1fc27a74..c7f39c29211 100644 --- a/stackslib/src/cost_estimates/tests/fee_scalar.rs +++ b/stackslib/src/cost_estimates/tests/fee_scalar.rs @@ -1,5 +1,6 @@ use std::env; use std::path::PathBuf; +use std::time::Instant; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, StandardPrincipalData}; @@ -11,7 +12,6 @@ use stacks_common::types::chainstate::{ }; use stacks_common::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::vrf::VRFProof; -use time::Instant; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::stacks::db::{StacksEpochReceipt, StacksHeaderInfo}; From 0d655f79f56d0f8a7ab303514daa2824ad9a2dac Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 12 Mar 2025 06:22:36 -0700 Subject: [PATCH 106/238] removing test epoch_25::microblocks_disabled --- testnet/stacks-node/src/tests/epoch_25.rs | 306 ---------------------- 1 file changed, 306 deletions(-) delete mode 100644 testnet/stacks-node/src/tests/epoch_25.rs diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs deleted file mode 100644 index 3864d9c3507..00000000000 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::{env, thread}; - -use clarity::vm::types::PrincipalData; -use stacks::burnchains::{Burnchain, PoxConstants}; -use stacks::config::InitialBalance; -use stacks::core::{self, EpochList, StacksEpochId}; -use stacks_common::consts::STACKS_EPOCH_MAX; -use stacks_common::types::chainstate::StacksPrivateKey; - -use crate::tests::bitcoin_regtest::BitcoinCoreController; -use crate::tests::nakamoto_integrations::wait_for; -use crate::tests::neon_integrations::{ - get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, - test_observer, wait_for_runloop, -}; -use crate::tests::{make_stacks_transfer_mblock_only, to_addr}; -use crate::{neon, BitcoinRegtestController, BurnchainController}; - -#[test] -#[ignore] -fn microblocks_disabled() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let reward_cycle_len = 10; - let prepare_phase_len = 3; - let epoch_2_05 = 1; - let epoch_2_1 = 2; - let v1_unlock_height = epoch_2_1 + 1; - let epoch_2_2 = 3; // two blocks before next prepare phase. - let epoch_2_3 = 4; - let epoch_2_4 = 5; - let pox_3_activation_height = epoch_2_4; - let epoch_2_5 = 210; - - let spender_1_bal = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let spender_2_bal = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - - let spender_1_sk = StacksPrivateKey::random(); - let spender_1_addr: PrincipalData = to_addr(&spender_1_sk).into(); - - let spender_2_sk = StacksPrivateKey::random(); - let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); - - let mut initial_balances = vec![]; - - initial_balances.push(InitialBalance { - address: spender_1_addr.clone(), - amount: spender_1_bal, - }); - - initial_balances.push(InitialBalance { - address: spender_2_addr.clone(), - amount: spender_2_bal, - }); - - let (mut conf, miner_account) = neon_integration_test_conf(); - - conf.node.mine_microblocks = true; - conf.burnchain.max_rbf = 1000000; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 1_000; - conf.node.wait_time_for_blocks = 2_000; - conf.miner.wait_for_block_download = false; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - test_observer::spawn(); - test_observer::register_any(&mut conf); - conf.initial_balances.append(&mut initial_balances); - - let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); - epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; - epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; - epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; - epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; - epochs[StacksEpochId::Epoch21].end_height = epoch_2_2; - epochs[StacksEpochId::Epoch22].start_height = epoch_2_2; - epochs[StacksEpochId::Epoch22].end_height = epoch_2_3; - epochs[StacksEpochId::Epoch23].start_height = epoch_2_3; - epochs[StacksEpochId::Epoch23].end_height = epoch_2_4; - epochs[StacksEpochId::Epoch24].start_height = epoch_2_4; - epochs[StacksEpochId::Epoch24].end_height = epoch_2_5; - epochs[StacksEpochId::Epoch25].start_height = epoch_2_5; - epochs[StacksEpochId::Epoch25].end_height = STACKS_EPOCH_MAX; - epochs.truncate_after(StacksEpochId::Epoch25); - conf.burnchain.epochs = Some(epochs); - - let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); - - let pox_constants = PoxConstants::new( - reward_cycle_len, - prepare_phase_len, - 4 * prepare_phase_len / 5, - 5, - 15, - u64::MAX - 2, - u64::MAX - 1, - v1_unlock_height as u32, - epoch_2_2 as u32 + 1, - u32::MAX, - pox_3_activation_height as u32, - ); - burnchain_config.pox_constants = pox_constants; - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .map_err(|_e| ()) - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let runloop_burnchain = burnchain_config; - - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // push us to block 205 - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Ensure we start off with 0 microblocks - assert!(test_observer::get_microblocks().is_empty()); - - let tx = make_stacks_transfer_mblock_only( - &spender_1_sk, - 0, - 500, - conf.burnchain.chain_id, - &spender_2_addr, - 500, - ); - submit_tx(&http_origin, &tx); - - // Wait for a microblock to be assembled - wait_for(60, || Ok(test_observer::get_microblocks().len() == 1)) - .expect("Failed to wait for microblocks to be assembled"); - - // mine Bitcoin blocks up until just before epoch 2.5 - wait_for(120, || { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_5 - 2 { - return Ok(true); - } - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - Ok(false) - }) - .expect("Failed to wait until just before epoch 2.5"); - - // Verify that the microblock was processed - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - let old_tip_info = get_chain_info(&conf); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for(30, || { - let tip_info = get_chain_info(&conf); - Ok(tip_info.burn_block_height >= old_tip_info.burn_block_height + 3) - }) - .expect("Failed to process block"); - - info!("Test passed processing 2.5"); - - // Submit another microblock only transaction - let tx = make_stacks_transfer_mblock_only( - &spender_1_sk, - 1, - 500, - conf.burnchain.chain_id, - &spender_2_addr, - 500, - ); - submit_tx(&http_origin, &tx); - - // Wait for a microblock to be assembled, but expect none to be assembled - wait_for(30, || Ok(test_observer::get_microblocks().len() > 1)) - .expect_err("Microblocks should not have been assembled"); - - // Mine a block to see if the microblock gets processed - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second transaction should not have been processed! - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - let miner_nonce_before_microblock_assembly = get_account(&http_origin, &miner_account).nonce; - - // Now, lets tell the miner to try to mine microblocks, but don't try to confirm them! - info!("Setting STACKS_TEST_FORCE_MICROBLOCKS_POST_25"); - env::set_var("STACKS_TEST_FORCE_MICROBLOCKS_POST_25", "1"); - - // Wait for a second microblock to be assembled - wait_for(60, || Ok(test_observer::get_microblocks().len() == 2)) - .expect("Failed to wait for microblocks to be assembled"); - - // Mine a block to see if the microblock gets processed - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let miner_nonce_after_microblock_assembly = get_account(&http_origin, &miner_account).nonce; - - // second transaction should not have been processed -- even though we should have - // produced microblocks, they should not get accepted to the chain state - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - info!( - "Microblocks assembled: {}", - test_observer::get_microblocks().len() - ); - - // and our miner should have gotten some blocks accepted - assert_eq!( - miner_nonce_after_microblock_assembly, miner_nonce_before_microblock_assembly + 1, - "Mined before started microblock assembly: {miner_nonce_before_microblock_assembly}, Mined after started microblock assembly: {miner_nonce_after_microblock_assembly}" - ); - - // Now, tell the miner to try to confirm microblocks as well. - // This should test that the block gets rejected by append block - info!("Setting STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25"); - env::set_var("STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25", "1"); - - // Wait for a third microblock to be assembled - wait_for(60, || Ok(test_observer::get_microblocks().len() == 3)) - .expect("Failed to wait for microblocks to be assembled"); - - // Mine a block to see if the microblock gets processed - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let miner_nonce_after_microblock_confirmation = get_account(&http_origin, &miner_account).nonce; - - // our miner should not have gotten any more blocks accepted - assert_eq!( - miner_nonce_after_microblock_confirmation, - miner_nonce_after_microblock_assembly + 1, - "Mined after started microblock confimration: {miner_nonce_after_microblock_confirmation}", - ); - - // second transaction should not have been processed -- even though we should have - // produced microblocks, they should not get accepted to the chain state - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} From ebadadb5f9f081dbb3c0057c9d367fb60cac3ffd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 12 Mar 2025 11:29:43 -0400 Subject: [PATCH 107/238] test: insert transactions directly into the mempool This is WAY faster than using the RPC. --- .../src/tests/nakamoto_integrations.rs | 150 ++++++++++++------ 1 file changed, 105 insertions(+), 45 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6d1f12b9269..c0a7d1a822b 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -33,7 +33,7 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::{RejectReason, SignerMessage as SignerMessageV0}; use libsigner::{SignerSession, StackerDBSession}; -use rusqlite::{params, Connection, OptionalExtension}; +use rusqlite::{params, Connection, OptionalExtension, Transaction}; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ @@ -11232,6 +11232,58 @@ fn reload_miner_config() { run_loop_thread.join().unwrap(); } +fn insert_tx_in_mempool( + db_tx: &Transaction, + tx_hex: Vec, + origin_addr: &StacksAddress, + origin_nonce: u64, + fee: u64, + consensus_hash: &ConsensusHash, + block_header_hash: &BlockHeaderHash, + height: u64, +) { + let sql = "INSERT OR REPLACE INTO mempool ( + txid, + origin_address, + origin_nonce, + sponsor_address, + sponsor_nonce, + tx_fee, + length, + consensus_hash, + block_header_hash, + height, + accept_time, + tx) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)"; + let origin_addr_str = origin_addr.to_string(); + let length = tx_hex.len() as u64; + + let txid = { + let mut cursor = Cursor::new(&tx_hex); + StacksTransaction::consensus_deserialize(&mut cursor) + .expect("Failed to deserialize transaction") + .txid() + }; + let args = params![ + txid, + origin_addr_str, + origin_nonce, + origin_addr_str, + origin_nonce, + fee, + length, + consensus_hash, + block_header_hash, + height, + Utc::now().timestamp(), + tx_hex + ]; + db_tx + .execute(sql, args) + .expect("Failed to insert transaction into mempool"); +} + #[test] #[ignore] /// This test intends to check the timing of the mempool iteration when there @@ -11335,28 +11387,13 @@ fn large_mempool() { next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); - // Open a sqlite DB at mempool_db_path and insert a large number of transactions - // into the mempool. We will then mine a block and check how long it takes to - // mine the block and how long it takes to empty the mempool. let burnchain = naka_conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - let mut conn = Connection::open(&mempool_db_path).unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let sql = "INSERT OR REPLACE INTO mempool ( - txid, - origin_address, - origin_nonce, - sponsor_address, - sponsor_nonce, - tx_fee, - length, - consensus_hash, - block_header_hash, - height, - accept_time, - tx) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)"; + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); let db_tx = conn.transaction().unwrap(); info!("Sending the first round of funding"); @@ -11375,25 +11412,16 @@ fn large_mempool() { &recipient_addr.into(), 239_980, ); - let length = transfer_tx.len() as u64; - let mut cursor = Cursor::new(transfer_tx.clone()); - let tx = StacksTransaction::consensus_deserialize(&mut cursor).unwrap(); - let txid = tx.txid(); - let args = params![ - txid, - sender_addr.to_string(), - *nonce, - sender_addr.to_string(), + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, *nonce, transfer_fee, - length, - tip.consensus_hash, - tip.canonical_stacks_tip_hash, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, tip.stacks_block_height, - Utc::now().timestamp(), - transfer_tx - ]; - db_tx.execute(sql, args).unwrap(); + ); *nonce += 1; new_senders.push(recipient_sk); } @@ -11425,10 +11453,12 @@ fn large_mempool() { senders.extend(new_senders.iter().map(|sk| (sk, 0))); info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); let timer = Instant::now(); let mut new_senders = vec![]; for (sender_sk, nonce) in senders.iter_mut() { for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); let recipient_sk = StacksPrivateKey::random(); let recipient_addr = tests::to_addr(&recipient_sk); let transfer_tx = make_stacks_transfer( @@ -11439,11 +11469,21 @@ fn large_mempool() { &recipient_addr.into(), 9_050, ); - submit_tx(&http_origin, &transfer_tx); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); *nonce += 1; new_senders.push(recipient_sk); } } + db_tx.commit().unwrap(); info!("Sending second round of funding took {:?}", timer.elapsed()); @@ -11473,26 +11513,35 @@ fn large_mempool() { // Pause block mining TEST_MINE_STALL.set(true); + let db_tx = conn.transaction().unwrap(); let timer = Instant::now(); // Fill the mempool with the first round of transfers for _ in 0..25 { for (sender_sk, nonce) in senders.iter_mut() { - let sender_nonce = *nonce; + let sender_addr = tests::to_addr(sender_sk); let transfer_tx = make_stacks_transfer( sender_sk, - sender_nonce, + *nonce, transfer_fee, naka_conf.burnchain.chain_id, &recipient, 1, ); - // TODO: Insert these txs directly into the DB instead of using - // this RPC call - submit_tx(&http_origin, &transfer_tx); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); *nonce += 1; } } + db_tx.commit().unwrap(); info!( "Sending first round of transfers took {:?}", @@ -11540,24 +11589,35 @@ fn large_mempool() { // Pause block mining TEST_MINE_STALL.set(true); + let db_tx = conn.transaction().unwrap(); let timer = Instant::now(); // Fill the mempool with the second round of transfers for _ in 0..25 { for (sender_sk, nonce) in senders.iter_mut() { - let sender_nonce = *nonce; + let sender_addr = tests::to_addr(sender_sk); let transfer_tx = make_stacks_transfer( sender_sk, - sender_nonce, + *nonce, transfer_fee, naka_conf.burnchain.chain_id, &recipient, 1, ); - submit_tx(&http_origin, &transfer_tx); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); *nonce += 1; } } + db_tx.commit().unwrap(); info!( "Sending second round of transfers took {:?}", From 07ecfbb85ea8adf387c4fa5a131da58c666695de Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Wed, 12 Mar 2025 20:08:47 +0100 Subject: [PATCH 108/238] refactor: make clarity compile with no-default-features --- clarity/src/vm/database/clarity_store.rs | 1 - stacks-common/src/deps_common/bitcoin/util/hash.rs | 1 - stacks-common/src/util/mod.rs | 1 + 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index 694403513bd..36599d7eea4 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -21,7 +21,6 @@ use stacks_common::util::hash::{hex_bytes, to_hex, Sha512Trunc256Sum}; use crate::vm::analysis::AnalysisDatabase; use crate::vm::contexts::GlobalContext; -#[cfg(feature = "rusqlite")] use crate::vm::database::{ ClarityDatabase, ClarityDeserializable, ClaritySerializable, NULL_BURN_STATE_DB, NULL_HEADER_DB, }; diff --git a/stacks-common/src/deps_common/bitcoin/util/hash.rs b/stacks-common/src/deps_common/bitcoin/util/hash.rs index abfce8349f0..3847e314537 100644 --- a/stacks-common/src/deps_common/bitcoin/util/hash.rs +++ b/stacks-common/src/deps_common/bitcoin/util/hash.rs @@ -50,7 +50,6 @@ impl_array_newtype!(Ripemd160Hash, u8, 20); /// A Bitcoin hash160, 20-bytes, computed from x as RIPEMD160(SHA256(x)) pub struct Hash160([u8; 20]); impl_array_newtype!(Hash160, u8, 20); -impl_byte_array_rusqlite_only!(Hash160); impl Hash160 { /// Convert the Hash160 inner bytes to a non-prefixed hex string diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 46158d2f4f4..c6ba30f3d26 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -19,6 +19,7 @@ pub mod log; #[macro_use] pub mod macros; pub mod chunked_encoding; +#[cfg(feature = "rusqlite")] pub mod db; pub mod hash; pub mod pair; From d3f2ba5e18172aef8346989bf60c0e93b155996e Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 12 Mar 2025 13:08:20 -0700 Subject: [PATCH 109/238] Remove microblock tests from CI --- .github/workflows/bitcoin-tests.yml | 17 - testnet/stacks-node/src/tests/epoch_205.rs | 346 - .../src/tests/neon_integrations.rs | 9539 ++++++----------- 3 files changed, 3292 insertions(+), 6610 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 6bd63f11a77..ba3f6fc356b 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -70,30 +70,13 @@ jobs: - test-name: tests::neon_integrations::lockup_integration - test-name: tests::neon_integrations::most_recent_utxo_integration_test - test-name: tests::neon_integrations::run_with_custom_wallet - - test-name: tests::neon_integrations::test_competing_miners_build_anchor_blocks_and_microblocks_on_same_chain - test-name: tests::neon_integrations::test_competing_miners_build_anchor_blocks_on_same_chain_without_rbf - test-name: tests::neon_integrations::test_one_miner_build_anchor_blocks_on_same_chain_without_rbf - test-name: tests::signer::v0::tenure_extend_after_2_bad_commits - test-name: tests::stackerdb::test_stackerdb_event_observer - test-name: tests::stackerdb::test_stackerdb_load_store - # Microblock tests that are no longer needed on every CI run - # (microblocks are unsupported starting in Epoch 2.5) - - test-name: tests::neon_integrations::bad_microblock_pubkey - - test-name: tests::neon_integrations::microblock_fork_poison_integration_test - - test-name: tests::neon_integrations::microblock_integration_test - - test-name: tests::neon_integrations::microblock_large_tx_integration_test_FLAKY - - test-name: tests::neon_integrations::microblock_limit_hit_integration_test - - test-name: tests::neon_integrations::microblock_miner_multiple_attempts - - test-name: tests::neon_integrations::test_problematic_microblocks_are_not_mined - - test-name: tests::neon_integrations::test_problematic_microblocks_are_not_relayed_or_stored - - test-name: tests::neon_integrations::runtime_overflow_unconfirmed_microblocks_integration_test - - test-name: tests::neon_integrations::size_overflow_unconfirmed_invalid_stream_microblocks_integration_test - - test-name: tests::neon_integrations::size_overflow_unconfirmed_microblocks_integration_test - - test-name: tests::neon_integrations::size_overflow_unconfirmed_stream_microblocks_integration_test - - test-name: tests::epoch_25::microblocks_disabled # Epoch tests are covered by the epoch-tests CI workflow, and don't need to run # on every PR (for older epochs) - - test-name: tests::epoch_205::bigger_microblock_streams_in_2_05 - test-name: tests::epoch_205::test_cost_limit_switch_version205 - test-name: tests::epoch_205::test_dynamic_db_method_costs - test-name: tests::epoch_205::test_exact_block_costs diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 7462acd9637..1e602a4a7df 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -899,349 +899,3 @@ fn test_cost_limit_switch_version205() { channel.stop_chains_coordinator(); } - -// mine a stream of microblocks, and verify that microblock streams can get bigger after the epoch -// transition -#[test] -#[ignore] -fn bigger_microblock_streams_in_2_05() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - - let (mut conf, miner_account) = neon_integration_test_conf(); - - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 10492300000, - }); - } - - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 0; - conf.node.max_microblocks = 65536; - conf.burnchain.max_rbf = 1000000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - conf.burnchain.epochs = Some(EpochList::new(&[ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 206, - block_limit: ExecutionCost { - write_length: 15000000, - write_count: 7750, - read_length: 100000000, - read_count: 7750, - runtime: 5000000000, - }, - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 206, - end_height: 10_002, - block_limit: ExecutionCost { - write_length: 15000000, - write_count: 7750 * 2, - read_length: 100000000, - read_count: 7750 * 2, - runtime: 5000000000, - }, - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 10_002, - end_height: 9223372036854775807, - block_limit: ExecutionCost { - write_length: 15000000, - write_count: 7750 * 2, - read_length: 100000000, - read_count: 7750 * 2, - runtime: 5000000000, - }, - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - ])); - conf.burnchain.pox_2_activation = Some(10_003); - - let txs: Vec> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - // almost fills a whole block - make_contract_publish_microblock_only( - spender_sk, - 0, - 1049230, - conf.burnchain.chain_id, - &format!("large-{ix}"), - &format!(" - ;; a single one of these transactions consumes over half the runtime budget - (define-constant BUFF_TO_BYTE (list - 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f - 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f - 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f - 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f - 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f - 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f - 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f - 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f - 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f - 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f - 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf - 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf - 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf - 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf - 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef - 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff - )) - (define-private (crash-me-folder (input (buff 1)) (ctr uint)) - (begin - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (+ u1 ctr) - ) - ) - (define-public (crash-me (name (string-ascii 128))) - (begin - (fold crash-me-folder BUFF_TO_BYTE u0) - (print name) - (ok u0) - ) - ) - (begin - (crash-me \"large-contract-{ix}\")) - " - ) - ) - }) - .collect(); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .map_err(|_e| ()) - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let microblocks_processed = run_loop.get_microblocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // zeroth block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // let's query the miner's account nonce: - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10492300000); - } - - let mut ctr = 0; - while ctr < txs.len() { - submit_tx(&http_origin, &txs[ctr]); - if !wait_for_microblocks(µblocks_processed, 30) { - // we time out if we *can't* mine any more microblocks - break; - } - ctr += 1; - } - microblocks_processed.store(0, Ordering::SeqCst); - - // only one fit - assert_eq!(ctr, 1); - sleep_ms(5_000); - - // confirm it - eprintln!("confirm epoch 2.0 microblock stream"); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // send the rest of the transactions - while ctr < txs.len() { - submit_tx(&http_origin, &txs[ctr]); - ctr += 1; - } - - eprintln!("expect epoch transition"); - - microblocks_processed.store(0, Ordering::SeqCst); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // don't bother waiting for a microblock stream - - eprintln!("expect epoch 2.05 microblock stream"); - - microblocks_processed.store(0, Ordering::SeqCst); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for_microblocks(µblocks_processed, 180); - - microblocks_processed.store(0, Ordering::SeqCst); - - // this test can sometimes miss a mine block event. - sleep_ms(120_000); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let mut epoch_20_stream_cost = ExecutionCost::ZERO; - let mut epoch_205_stream_cost = ExecutionCost::ZERO; - - // max == largest number of transactions per stream in a given epoch (2.0 or 2.05) - // total == number of transactions across all streams in a given epoch (2.0 or 2.05) - let mut max_big_txs_per_microblock_20 = 0; - let mut total_big_txs_per_microblock_20 = 0; - - let mut max_big_txs_per_microblock_205 = 0; - let mut total_big_txs_per_microblock_205 = 0; - - let mut in_205; - let mut have_confirmed_205_stream; - - for i in 0..10 { - let blocks = test_observer::get_blocks(); - - max_big_txs_per_microblock_20 = 0; - total_big_txs_per_microblock_20 = 0; - - max_big_txs_per_microblock_205 = 0; - total_big_txs_per_microblock_205 = 0; - - in_205 = false; - have_confirmed_205_stream = false; - - // NOTE: this only counts the number of txs per stream, not in each microblock - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); - - let mut num_big_microblock_txs = 0; - let mut total_execution_cost = ExecutionCost::ZERO; - - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().contains("costs-2") { - in_205 = true; - } else if tsc.name.to_string().contains("large") { - num_big_microblock_txs += 1; - if in_205 { - total_big_txs_per_microblock_205 += 1; - } else { - total_big_txs_per_microblock_20 += 1; - } - } - } - let execution_cost = tx.get("execution_cost").unwrap(); - total_execution_cost.read_count += - execution_cost.get("read_count").unwrap().as_i64().unwrap() as u64; - total_execution_cost.read_length += - execution_cost.get("read_length").unwrap().as_i64().unwrap() as u64; - total_execution_cost.write_count += - execution_cost.get("write_count").unwrap().as_i64().unwrap() as u64; - total_execution_cost.write_length += execution_cost - .get("write_length") - .unwrap() - .as_i64() - .unwrap() as u64; - total_execution_cost.runtime += - execution_cost.get("runtime").unwrap().as_i64().unwrap() as u64; - } - if in_205 && num_big_microblock_txs > max_big_txs_per_microblock_205 { - max_big_txs_per_microblock_205 = num_big_microblock_txs; - } - if !in_205 && num_big_microblock_txs > max_big_txs_per_microblock_20 { - max_big_txs_per_microblock_20 = num_big_microblock_txs; - } - - eprintln!("Epoch size: {total_execution_cost:?}"); - - if !in_205 && total_execution_cost.exceeds(&epoch_20_stream_cost) { - epoch_20_stream_cost = total_execution_cost; - break; - } - if in_205 && total_execution_cost.exceeds(&ExecutionCost::ZERO) { - have_confirmed_205_stream = true; - epoch_205_stream_cost = total_execution_cost; - break; - } - } - - if have_confirmed_205_stream { - break; - } else { - eprintln!("Trying to confirm a stream again (attempt {})", i + 1); - sleep_ms((i + 2) * 60_000); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - } - } - - eprintln!( - "max_big_txs_per_microblock_20: {max_big_txs_per_microblock_20}, total_big_txs_per_microblock_20: {total_big_txs_per_microblock_20}" - ); - eprintln!( - "max_big_txs_per_microblock_205: {max_big_txs_per_microblock_205}, total_big_txs_per_microblock_205: {total_big_txs_per_microblock_205}" - ); - eprintln!("confirmed stream execution in 2.0: {epoch_20_stream_cost:?}"); - eprintln!("confirmed stream execution in 2.05: {epoch_205_stream_cost:?}"); - - // stuff happened - assert!(epoch_20_stream_cost.runtime > 0); - assert!(epoch_205_stream_cost.runtime > 0); - - // more stuff happened in epoch 2.05 - assert!(epoch_205_stream_cost.read_count > epoch_20_stream_cost.read_count); - assert!(epoch_205_stream_cost.read_length > epoch_20_stream_cost.read_length); - assert!(epoch_205_stream_cost.write_count > epoch_20_stream_cost.write_count); - assert!(epoch_205_stream_cost.write_length > epoch_20_stream_cost.write_length); - - // but epoch 2.05 was *cheaper* in terms of CPU - assert!(epoch_205_stream_cost.runtime < epoch_20_stream_cost.runtime); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 68b8474efb3..5111393dd8c 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -1574,76 +1574,6 @@ fn deep_contract() { test_observer::clear(); } -#[test] -#[ignore] -fn bad_microblock_pubkey() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let (mut conf, _miner_account) = neon_integration_test_conf(); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // fault injection - env::set_var( - "STACKS_MICROBLOCK_PUBKEY_HASH", - "0000000000000000000000000000000000000000", - ); - for _i in 0..10 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - } - env::set_var("STACKS_MICROBLOCK_PUBKEY_HASH", ""); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let blocks = test_observer::get_blocks(); - assert!(blocks.len() >= 5); - assert!(blocks.len() <= 6); - - channel.stop_chains_coordinator(); - test_observer::clear(); -} - #[test] #[ignore] fn liquid_ustx_integration() { @@ -3416,41 +3346,51 @@ fn make_signed_microblock( #[test] #[ignore] -fn microblock_fork_poison_integration_test() { +fn filter_low_fee_tx_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let second_spender_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); - let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); + let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1049230, + }); + } - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 100300, - }); - conf.initial_balances.push(InitialBalance { - address: second_spender_addr.clone(), - amount: 10000, - }); - - // we'll manually post a forked stream to the node - conf.node.mine_microblocks = false; - conf.burnchain.max_rbf = 1000000; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 1_000; - conf.miner.first_attempt_time_ms = 2_000; - conf.miner.subsequent_attempt_time_ms = 5_000; - conf.node.wait_time_for_blocks = 1_000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let txs: Vec<_> = spender_sks + .iter() + .enumerate() + .map(|(ix, spender_sk)| { + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - test_observer::spawn(); - test_observer::register_any(&mut conf); + if ix < 5 { + // low-fee + make_stacks_transfer( + spender_sk, + 0, + 1000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ) + } else { + // high-fee + make_stacks_transfer( + spender_sk, + 0, + 2000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ) + } + }) + .collect(); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -3464,10 +3404,8 @@ fn microblock_fork_poison_integration_test() { eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf.clone()); + let mut run_loop = neon::RunLoop::new(conf); let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); - let miner_status = run_loop.get_miner_status(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -3484,219 +3422,169 @@ fn microblock_fork_poison_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); - // turn off the miner for now, so we can ensure both of these get accepted and preprocessed - // before we try and mine an anchor block that confirms them - eprintln!("Disable miner"); - signal_mining_blocked(miner_status.clone()); - sleep_ms(10_000); + for tx in txs.iter() { + submit_tx(&http_origin, tx); + } - // our first spender - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.balance, 100300); - assert_eq!(account.nonce, 0); + // mine a couple more blocks + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // our second spender - let account = get_account(&http_origin, &second_spender_addr); - assert_eq!(account.balance, 10000); - assert_eq!(account.nonce, 0); + // First five accounts have a transaction. The miner will consider low fee transactions, + // but rank by estimated fee rate. + // Last five accounts have transaction + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, spender_addr); + assert_eq!(account.nonce, 1); + } - info!("Test microblock"); + channel.stop_chains_coordinator(); +} - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( - &spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); - let unconfirmed_tx = - StacksTransaction::consensus_deserialize(&mut &unconfirmed_tx_bytes[..]).unwrap(); - let second_unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( - &second_spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1500, - ); - let second_unconfirmed_tx = - StacksTransaction::consensus_deserialize(&mut &second_unconfirmed_tx_bytes[..]).unwrap(); +#[test] +#[ignore] +fn filter_long_runtime_tx_integration_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } - // TODO (hack) instantiate the sortdb in the burnchain - let _ = btc_regtest_controller.sortdb_mut(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); + let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - // put each into a microblock - let (first_microblock, second_microblock) = { - let tip_info = get_chain_info(&conf); - let stacks_tip = tip_info.stacks_tip; + let (mut conf, _) = neon_integration_test_conf(); + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1049230, + }); + } - let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf); - let tip_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); - let privk = - find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024) - .unwrap(); - let (mut chainstate, _) = StacksChainState::open( - false, - CHAIN_ID_TESTNET, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); + // ...but none of them will be mined since we allot zero ms to do so + conf.miner.first_attempt_time_ms = 0; + conf.miner.subsequent_attempt_time_ms = 0; - chainstate - .reload_unconfirmed_state( - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), - tip_hash, + let txs: Vec<_> = spender_sks + .iter() + .enumerate() + .map(|(ix, spender_sk)| { + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + make_stacks_transfer( + spender_sk, + 0, + 1000 + (ix as u64), + conf.burnchain.chain_id, + &recipient.into(), + 1000, ) - .unwrap(); - let iconn = btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(); - let first_microblock = make_microblock( - &privk, - &mut chainstate, - &iconn, - consensus_hash, - stacks_block, - vec![unconfirmed_tx], - ); + }) + .collect(); - eprintln!( - "Created first microblock: {}: {first_microblock:?}", - &first_microblock.block_hash() - ); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); - // NOTE: this microblock conflicts because it has the same parent as the first microblock, - // even though it's seq is different. - let second_microblock = - make_signed_microblock(&privk, vec![second_unconfirmed_tx], stacks_tip, 1); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); - eprintln!( - "Created second conflicting microblock: {}: {second_microblock:?}", - &second_microblock.block_hash() - ); - (first_microblock, second_microblock) - }; + btc_regtest_controller.bootstrap_chain(201); - let mut microblock_bytes = vec![]; - first_microblock - .consensus_serialize(&mut microblock_bytes) - .unwrap(); + eprintln!("Chain bootstrapped..."); - // post the first microblock - let path = format!("{http_origin}/v2/microblocks"); - let res: String = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(microblock_bytes.clone()) - .send() - .unwrap() - .json() - .unwrap(); + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); - assert_eq!(res, format!("{}", &first_microblock.block_hash())); + let channel = run_loop.get_coordinator_channel().unwrap(); - let mut second_microblock_bytes = vec![]; - second_microblock - .consensus_serialize(&mut second_microblock_bytes) - .unwrap(); + thread::spawn(move || run_loop.start(None, 0)); - // post the second microblock - let path = format!("{http_origin}/v2/microblocks"); - let res: String = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(second_microblock_bytes.clone()) - .send() - .unwrap() - .json() - .unwrap(); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - assert_eq!(res, format!("{}", &second_microblock.block_hash())); + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - eprintln!("Wait 10s and re-enable miner"); - sleep_ms(10_000); + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // resume mining - eprintln!("Enable miner"); - signal_mining_ready(miner_status); - sleep_ms(10_000); + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - eprintln!("Attempt to mine poison-microblock"); - let mut found = false; - for _i in 0..10 { - if found { - break; - } - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let blocks = test_observer::get_blocks(); - for block in blocks.iter() { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + for tx in txs.iter() { + submit_tx(&http_origin, tx); + } - if let TransactionPayload::PoisonMicroblock(..) = &parsed.payload { - found = true; - break; - } - } - } - } + // mine a couple more blocks + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - assert!( - found, - "Did not find poison microblock tx in any mined block" - ); + // no transactions mined + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.nonce, 0); + } - test_observer::clear(); channel.stop_chains_coordinator(); } #[test] #[ignore] -fn microblock_integration_test() { +fn miner_submit_twice() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let second_spender_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); - let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); - - let (mut conf, miner_account) = neon_integration_test_conf(); + let contract_content = " + (define-public (foo (a int)) + (ok (* 2 (+ a 1)))) + (define-private (bar) + (foo 56)) + "; - conf.miner.wait_for_block_download = false; + let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), - amount: 100300, - }); - conf.initial_balances.push(InitialBalance { - address: second_spender_addr.clone(), - amount: 10000, + amount: 1049230, }); - conf.node.mine_microblocks = true; - conf.node.microblock_frequency = 1_000; - conf.miner.microblock_attempt_time_ms = 1_000; - conf.node.wait_time_for_microblocks = 0; + conf.node.mine_microblocks = false; + // one should be mined in first attempt, and two should be in second attempt + conf.miner.first_attempt_time_ms = 20; + conf.miner.subsequent_attempt_time_ms = 30_000; - test_observer::spawn(); - test_observer::register_any(&mut conf); + let tx_1 = make_contract_publish( + &spender_sk, + 0, + 50_000, + conf.burnchain.chain_id, + "first-contract", + contract_content, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 50_000, + conf.burnchain.chain_id, + "second-contract", + contract_content, + ); + + // note: this test depends on timing of how long it takes to assemble a block, + // but it won't flake if the miner behaves correctly: a correct miner should + // always be able to mine both transactions by the end of this test. an incorrect + // miner may sometimes pass this test though, if they can successfully mine a + // 2-transaction block in 20 ms *OR* if they are slow enough that they mine a + // 0-transaction block in that time (because this would trigger a re-attempt, which + // is exactly what this test is measuring). + // + // The "fixed" behavior is the corner case where a miner did a "first attempt", which + // included 1 or more transaction, but they could have made a second attempt with + // more transactions. let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -3710,9 +3598,8 @@ fn microblock_integration_test() { eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf.clone()); + let mut run_loop = neon::RunLoop::new(conf); let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -3730,510 +3617,208 @@ fn microblock_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // let's query the miner's account nonce: - info!("Miner account: {miner_account}"); - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.balance, 0); - assert_eq!(account.nonce, 1); + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); + + // mine a couple more blocks + // waiting enough time between them that a second attempt could be made. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + thread::sleep(Duration::from_secs(15)); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // and our first spender + // 1 transaction mined let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.balance, 100300); - assert_eq!(account.nonce, 0); + assert_eq!(account.nonce, 2); - // and our second spender - let account = get_account(&http_origin, &second_spender_addr); - assert_eq!(account.balance, 10000); - assert_eq!(account.nonce, 0); + channel.stop_chains_coordinator(); +} - // okay, let's push a transaction that is marked microblock only! - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let tx = make_stacks_transfer_mblock_only( - &spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); - submit_tx(&http_origin, &tx); +#[test] +#[ignore] +fn size_check_integration_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } - info!("Try to mine a microblock-only tx"); + let mut giant_contract = "(define-public (f) (ok 1))".to_string(); + for _i in 0..(1024 * 1024 + 500) { + giant_contract.push(' '); + } - // now let's mine a couple blocks, and then check the sender's nonce. - // this one wakes up our node, so that it'll mine a microblock _and_ an anchor block. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); + let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - // this one will contain the sortition from above anchor block, - // which *should* have also confirmed the microblock. - info!("Wait for second block"); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); + let (mut conf, miner_account) = neon_integration_test_conf(); - // I guess let's push another block for good measure? - info!("Wait for third block"); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); + // make a bunch of txs that will only fit one per block. + let txs: Vec<_> = spender_sks + .iter() + .enumerate() + .map(|(ix, spender_sk)| { + if ix % 2 == 0 { + make_contract_publish( + spender_sk, + 0, + 1049230, + conf.burnchain.chain_id, + "large-0", + &giant_contract, + ) + } else { + let tx = make_contract_publish_microblock_only( + spender_sk, + 0, + 1049230, + conf.burnchain.chain_id, + "large-0", + &giant_contract, + ); + let parsed_tx = StacksTransaction::consensus_deserialize(&mut &tx[..]).unwrap(); + debug!("Mine transaction {} in a microblock", &parsed_tx.txid()); + tx + } + }) + .collect(); - info!("Test microblock"); + for spender_addr in spender_addrs.iter() { + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 1049230, + }); + } - // microblock must have bumped our nonce - // and our spender - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 1); + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 5000; + conf.node.microblock_frequency = 5000; + conf.miner.microblock_attempt_time_ms = 120_000; - // push another two transactions that are marked microblock only - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( - &spender_sk, - 1, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); - let unconfirmed_tx = - StacksTransaction::consensus_deserialize(&mut &unconfirmed_tx_bytes[..]).unwrap(); - let second_unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( - &second_spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1500, - ); - let second_unconfirmed_tx = - StacksTransaction::consensus_deserialize(&mut &second_unconfirmed_tx_bytes[..]).unwrap(); + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - // TODO (hack) instantiate the sortdb in the burnchain - let _ = btc_regtest_controller.sortdb_mut(); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); - // put each into a microblock - let (first_microblock, second_microblock) = { - let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf); - let tip_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); - let privk = - find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024) - .unwrap(); - let (mut chainstate, _) = StacksChainState::open( - false, - CHAIN_ID_TESTNET, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); - chainstate - .reload_unconfirmed_state( - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), - tip_hash, - ) - .unwrap(); - let iconn = btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(); - let first_microblock = make_microblock( - &privk, - &mut chainstate, - &iconn, - consensus_hash, - stacks_block, - vec![unconfirmed_tx], - ); + btc_regtest_controller.bootstrap_chain(201); - eprintln!( - "Created first microblock: {}: {first_microblock:?}", - &first_microblock.block_hash() - ); - /* - let second_microblock = - make_signed_microblock(&privk, vec![second_unconfirmed_tx], stacks_tip, 1); - */ - let second_microblock = make_signed_microblock( - &privk, - vec![second_unconfirmed_tx], - first_microblock.block_hash(), - 1, - ); - eprintln!( - "Created second microblock: {}: {second_microblock:?}", - &second_microblock.block_hash() - ); - (first_microblock, second_microblock) - }; + eprintln!("Chain bootstrapped..."); - let mut microblock_bytes = vec![]; - first_microblock - .consensus_serialize(&mut microblock_bytes) - .unwrap(); + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); - // post the first microblock - let path = format!("{http_origin}/v2/microblocks"); - let res: String = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(microblock_bytes.clone()) - .send() - .unwrap() - .json() - .unwrap(); + let channel = run_loop.get_coordinator_channel().unwrap(); - assert_eq!(res, format!("{}", &first_microblock.block_hash())); + thread::spawn(move || run_loop.start(None, 0)); - eprintln!("\n\nBegin testing\nmicroblock: {first_microblock:?}\n\n"); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 98300); + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let mut second_microblock_bytes = vec![]; - second_microblock - .consensus_serialize(&mut second_microblock_bytes) - .unwrap(); + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // post the second microblock - let path = format!("{http_origin}/v2/microblocks"); - let res: String = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(second_microblock_bytes.clone()) - .send() - .unwrap() - .json() - .unwrap(); + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - assert_eq!(res, format!("{}", &second_microblock.block_hash())); + // let's query the miner's account nonce: + let account = get_account(&http_origin, &miner_account); + assert_eq!(account.nonce, 1); + assert_eq!(account.balance, 0); + // and our potential spenders: - sleep_ms(5_000); + for spender_addr in spender_addrs.iter() { + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.nonce, 0); + assert_eq!(account.balance, 1049230); + } - let mut iter_count = 0; - let tip_info = loop { - let tip_info = get_chain_info(&conf); - eprintln!("{:#?}", tip_info); - match tip_info.unanchored_tip { - None => { - iter_count += 1; - assert!( - iter_count < 10, - "Hit retry count while waiting for net module to process pushed microblock" - ); - sleep_ms(5_000); - continue; - } - Some(_tip) => break tip_info, - } - }; - - assert!(tip_info.stacks_tip_height >= 3); - let stacks_tip = tip_info.stacks_tip; - let stacks_tip_consensus_hash = tip_info.stacks_tip_consensus_hash; - let stacks_id_tip = - StacksBlockHeader::make_index_block_hash(&stacks_tip_consensus_hash, &stacks_tip); - - // todo - pipe in the PoxSyncWatchdog to the RunLoop struct to avoid flakiness here - // wait at least two p2p refreshes so it can produce the microblock - for i in 0..30 { - info!( - "wait {} more seconds for microblock miner to find our transaction...", - 30 - i - ); - sleep_ms(1000); - } - - // check event observer for new microblock event (expect at least 2) - let mut microblock_events = test_observer::get_microblocks(); - assert!(microblock_events.len() >= 2); - - // this microblock should correspond to `second_microblock` - let microblock = microblock_events.pop().unwrap(); - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); - assert_eq!(transactions.len(), 1); - let tx_sequence = transactions[0] - .get("microblock_sequence") - .unwrap() - .as_u64() - .unwrap(); - assert_eq!(tx_sequence, 1); - let microblock_hash = transactions[0] - .get("microblock_hash") - .unwrap() - .as_str() - .unwrap(); - assert_eq!( - microblock_hash[2..], - format!("{}", second_microblock.header.block_hash()) - ); - let microblock_associated_hash = microblock - .get("parent_index_block_hash") - .unwrap() - .as_str() - .unwrap(); - let index_block_hash_bytes = hex_bytes(µblock_associated_hash[2..]).unwrap(); - assert_eq!( - StacksBlockId::from_vec(&index_block_hash_bytes), - Some(stacks_id_tip) - ); - // make sure we have stats for the burn block - let _burn_block_hash = microblock.get("burn_block_hash").unwrap().as_str().unwrap(); - let _burn_block_height = microblock - .get("burn_block_height") - .unwrap() - .as_u64() - .unwrap(); - let _burn_block_timestamp = microblock - .get("burn_block_timestamp") - .unwrap() - .as_u64() - .unwrap(); - - // this microblock should correspond to the first microblock that was posted - let microblock = microblock_events.pop().unwrap(); - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); - assert_eq!(transactions.len(), 1); - let tx_sequence = transactions[0] - .get("microblock_sequence") - .unwrap() - .as_u64() - .unwrap(); - assert_eq!(tx_sequence, 0); - - // check mempool tx events - let memtx_events = test_observer::get_memtxs(); - assert_eq!(memtx_events.len(), 1); - assert_eq!(&memtx_events[0], &format!("0x{}", &bytes_to_hex(&tx))); - - // let's make sure the returned blocks all point at each other. - let blocks_observed = test_observer::get_blocks(); - // we at least mined 5 blocks - assert!( - blocks_observed.len() >= 3, - "Blocks observed {} should be >= 3", - blocks_observed.len() - ); - assert_eq!(blocks_observed.len() as u64, tip_info.stacks_tip_height + 1); - - let burn_blocks_observed = test_observer::get_burn_blocks(); - let burn_blocks_with_burns: Vec<_> = burn_blocks_observed - .into_iter() - .filter(|block| block.get("burn_amount").unwrap().as_u64().unwrap() > 0) - .collect(); - assert!( - burn_blocks_with_burns.len() >= 3, - "Burn block sortitions {} should be >= 3", - burn_blocks_with_burns.len() - ); - for burn_block in burn_blocks_with_burns { - eprintln!("{burn_block}"); - } - - let mut prior = None; - for block in blocks_observed.iter() { - let parent_index_hash = block - .get("parent_index_block_hash") - .unwrap() - .as_str() - .unwrap() - .to_string(); - let my_index_hash = block - .get("index_block_hash") - .unwrap() - .as_str() - .unwrap() - .to_string(); - if let Some(ref previous_index_hash) = prior { - assert_eq!(&parent_index_hash, previous_index_hash); - } - - // make sure we have a burn_block_hash, burn_block_height and miner_txid - - let _burn_block_hash = block.get("burn_block_hash").unwrap().as_str().unwrap(); - - let _burn_block_height = block.get("burn_block_height").unwrap().as_u64().unwrap(); - - let _miner_txid = block.get("miner_txid").unwrap().as_str().unwrap(); - - // make sure we have stats for the previous burn block - let _parent_burn_block_hash = block - .get("parent_burn_block_hash") - .unwrap() - .as_str() - .unwrap(); - - let _parent_burn_block_height = block - .get("parent_burn_block_height") - .unwrap() - .as_u64() - .unwrap(); - - let _parent_burn_block_timestamp = block - .get("parent_burn_block_timestamp") - .unwrap() - .as_u64() - .unwrap(); - - prior = Some(my_index_hash); + for tx in txs.iter() { + // okay, let's push a bunch of transactions that can only fit one per block! + submit_tx(&http_origin, tx); } - // we can query unconfirmed state from the microblock we announced - let path = format!( - "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", - &tip_info.unanchored_tip.unwrap() - ); - - eprintln!("{path:?}"); + let mut micro_block_txs = 0; + let mut anchor_block_txs = 0; - let mut iter_count = 0; - let res = loop { - let http_resp = client.get(&path).send().unwrap(); + for i in 0..100 { + // now let's mine a couple blocks, and then check the sender's nonce. + // at the end of mining three blocks, there should be _at least one_ transaction from the microblock + // only set that got mined (since the block before this one was empty, a microblock can + // be added), + // and a number of transactions from equal to the number anchor blocks will get mined. + // + // this one wakes up our node, so that it'll mine a microblock _and_ an anchor block. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // this one will contain the sortition from above anchor block, + // which *should* have also confirmed the microblock. + sleep_ms(10_000 * i); - info!("{:?}", http_resp); + micro_block_txs = 0; + anchor_block_txs = 0; - match http_resp.json::() { - Ok(x) => break x, - Err(e) => { - warn!("Failed to query {path}; will try again. Err = {e:?}"); - iter_count += 1; - assert!(iter_count < 10, "Retry limit reached querying account"); - sleep_ms(1000); - continue; + // let's figure out how many micro-only and anchor-only txs got accepted + // by examining our account nonces: + for (ix, spender_addr) in spender_addrs.iter().enumerate() { + let res = get_account(&http_origin, &spender_addr); + if res.nonce == 1 { + if ix % 2 == 0 { + anchor_block_txs += 1; + } else { + micro_block_txs += 1; + } + } else if res.nonce != 0 { + panic!("Spender address nonce incremented past 1"); } - }; - }; - - info!("Account Response = {res:#?}"); - assert_eq!(res.nonce, 2); - assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 96300); - - // limited by chaining - for next_nonce in 2..5 { - // verify that the microblock miner can automatically pick up transactions - debug!("Try to send unconfirmed tx from {spender_addr} to {recipient} nonce {next_nonce}"); - let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( - &spender_sk, - next_nonce, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(unconfirmed_tx_bytes.clone()) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &unconfirmed_tx_bytes[..]) - .unwrap() - .txid() - .to_string() - ); - eprintln!("Sent {res}"); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - // wait at least two p2p refreshes - // so it can produce the microblock - for i in 0..30 { - debug!( - "wait {} more seconds for microblock miner to find our transaction...", - 30 - i - ); - sleep_ms(1000); + debug!("Spender {ix},{spender_addr}: {res:?}"); } - // we can query _new_ unconfirmed state from the microblock we announced - let path = format!( - "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", - &tip_info.unanchored_tip.unwrap() - ); - - let res_text = client.get(&path).send().unwrap().text().unwrap(); - - eprintln!("text of {path}\n{res_text}"); - - let res = client - .get(&path) - .send() - .unwrap() - .json::() - .unwrap(); - eprintln!("{path:?}"); - eprintln!("{res:#?}"); + eprintln!("anchor_block_txs: {anchor_block_txs}, micro_block_txs: {micro_block_txs}"); - // advanced! - assert_eq!(res.nonce, next_nonce + 1); - assert_eq!( - u128::from_str_radix(&res.balance[2..], 16).unwrap(), - (96300 - 2000 * (next_nonce - 1)) as u128 - ); + if anchor_block_txs >= 2 && micro_block_txs >= 2 { + break; + } } + assert!(anchor_block_txs >= 2); + assert!(micro_block_txs >= 2); + test_observer::clear(); channel.stop_chains_coordinator(); } #[test] #[ignore] -fn filter_low_fee_tx_integration_test() { +fn block_replay_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let (mut conf, _) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } + let (mut conf, miner_account) = neon_integration_test_conf(); - let txs: Vec<_> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 100300, + }); - if ix < 5 { - // low-fee - make_stacks_transfer( - spender_sk, - 0, - 1000 + (ix as u64), - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ) - } else { - // high-fee - make_stacks_transfer( - spender_sk, - 0, - 2000 + (ix as u64), - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ) - } - }) - .collect(); + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 30000; + conf.node.microblock_frequency = 5_000; + + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + + test_observer::spawn(); + test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -4247,8 +3832,9 @@ fn filter_low_fee_tx_integration_test() { eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf); + let mut run_loop = neon::RunLoop::new(conf.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); + let client = reqwest::blocking::Client::new(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -4266,187 +3852,139 @@ fn filter_low_fee_tx_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - for tx in txs.iter() { - submit_tx(&http_origin, tx); - } - - // mine a couple more blocks - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // let's query the miner's account nonce: - // First five accounts have a transaction. The miner will consider low fee transactions, - // but rank by estimated fee rate. - // Last five accounts have transaction - for spender_addr in &spender_addrs { - let account = get_account(&http_origin, spender_addr); - assert_eq!(account.nonce, 1); - } + info!("Miner account: {miner_account}"); + let account = get_account(&http_origin, &miner_account); + assert_eq!(account.balance, 0); + assert_eq!(account.nonce, 1); - channel.stop_chains_coordinator(); -} + // and our spender + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.balance, 100300); + assert_eq!(account.nonce, 0); -#[test] -#[ignore] -fn filter_long_runtime_tx_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - - let (mut conf, _) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } - - // ...but none of them will be mined since we allot zero ms to do so - conf.miner.first_attempt_time_ms = 0; - conf.miner.subsequent_attempt_time_ms = 0; - - let txs: Vec<_> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - make_stacks_transfer( - spender_sk, - 0, - 1000 + (ix as u64), - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ) - }) - .collect(); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + let tx = make_stacks_transfer( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); + submit_tx(&http_origin, &tx); - // first block wakes up the run loop next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // first block will hold our VRF registration next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - for tx in txs.iter() { - submit_tx(&http_origin, tx); - } + // try and push the mined block back at the node lots of times + let (tip_consensus_hash, tip_block) = get_tip_anchored_block(&conf); + let mut tip_block_bytes = vec![]; + tip_block.consensus_serialize(&mut tip_block_bytes).unwrap(); - // mine a couple more blocks - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + for i in 0..1024 { + let path = format!("{http_origin}/v2/blocks/upload/{tip_consensus_hash}"); + let res_text = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tip_block_bytes.clone()) + .send() + .unwrap() + .text() + .unwrap(); - // no transactions mined - for spender_addr in &spender_addrs { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); + eprintln!("{i}: text of {path}\n{res_text}"); } + test_observer::clear(); channel.stop_chains_coordinator(); } #[test] #[ignore] -fn miner_submit_twice() { +fn cost_voting_integration() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sk = StacksPrivateKey::random(); - let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let contract_content = " - (define-public (foo (a int)) - (ok (* 2 (+ a 1)))) - (define-private (bar) - (foo 56)) + // let's make `<` free... + let cost_definer_src = " + (define-read-only (cost-definition-le (size uint)) + { + runtime: u0, write_length: u0, write_count: u0, read_count: u0, read_length: u0 + }) "; - let (mut conf, _) = neon_integration_test_conf(); - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); + // the contract that we'll test the costs of + let caller_src = " + (define-public (execute-2 (a uint)) + (ok (< a a))) + "; - conf.node.mine_microblocks = false; - // one should be mined in first attempt, and two should be in second attempt - conf.miner.first_attempt_time_ms = 20; - conf.miner.subsequent_attempt_time_ms = 30_000; + let power_vote_src = " + (define-public (propose-vote-confirm) + (let + ((proposal-id (unwrap-panic (contract-call? 'ST000000000000000000002AMW42H.cost-voting submit-proposal + 'ST000000000000000000002AMW42H.costs \"cost_le\" + .cost-definer \"cost-definition-le\"))) + (vote-amount (* u9000000000 u1000000))) + (try! (contract-call? 'ST000000000000000000002AMW42H.cost-voting vote-proposal proposal-id vote-amount)) + (try! (contract-call? 'ST000000000000000000002AMW42H.cost-voting confirm-votes proposal-id)) + (ok proposal-id))) + "; - let tx_1 = make_contract_publish( - &spender_sk, - 0, - 50_000, - conf.burnchain.chain_id, - "first-contract", - contract_content, - ); - let tx_2 = make_contract_publish( - &spender_sk, - 1, - 50_000, - conf.burnchain.chain_id, - "second-contract", - contract_content, - ); + let spender_sk = StacksPrivateKey::random(); + let spender_addr = to_addr(&spender_sk); + let spender_princ: PrincipalData = spender_addr.into(); - // note: this test depends on timing of how long it takes to assemble a block, - // but it won't flake if the miner behaves correctly: a correct miner should - // always be able to mine both transactions by the end of this test. an incorrect - // miner may sometimes pass this test though, if they can successfully mine a - // 2-transaction block in 20 ms *OR* if they are slow enough that they mine a - // 0-transaction block in that time (because this would trigger a re-attempt, which - // is exactly what this test is measuring). - // - // The "fixed" behavior is the corner case where a miner did a "first attempt", which - // included 1 or more transaction, but they could have made a second attempt with - // more transactions. + let (mut conf, miner_account) = neon_integration_test_conf(); + + conf.miner.microblock_attempt_time_ms = 1_000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.burnchain.max_rbf = 10_000_000; + conf.node.wait_time_for_blocks = 1_000; + + test_observer::spawn(); + test_observer::register_any(&mut conf); + + let spender_bal = 10_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); + + conf.initial_balances.push(InitialBalance { + address: spender_princ.clone(), + amount: spender_bal, + }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + None, + ); let http_origin = format!("http://{}", &conf.node.rpc_bind); btc_regtest_controller.bootstrap_chain(201); eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf); + let mut run_loop = neon::RunLoop::new(conf.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || run_loop.start(None, 0)); + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); // give the run loop some time to start up! wait_for_runloop(&blocks_processed); @@ -4460,257 +3998,327 @@ fn miner_submit_twice() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - submit_tx(&http_origin, &tx_1); - submit_tx(&http_origin, &tx_2); - - // mine a couple more blocks - // waiting enough time between them that a second attempt could be made. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - thread::sleep(Duration::from_secs(15)); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // let's query the miner's account nonce: + let res = get_account(&http_origin, &miner_account); + assert_eq!(res.balance, 0); + assert_eq!(res.nonce, 1); - // 1 transaction mined - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 2); + // and our spender: + let res = get_account(&http_origin, &spender_princ); + assert_eq!(res.balance, spender_bal as u128); + assert_eq!(res.nonce, 0); - channel.stop_chains_coordinator(); -} + let transactions = vec![ + make_contract_publish( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + "cost-definer", + cost_definer_src, + ), + make_contract_publish( + &spender_sk, + 1, + 1000, + conf.burnchain.chain_id, + "caller", + caller_src, + ), + make_contract_publish( + &spender_sk, + 2, + 1000, + conf.burnchain.chain_id, + "voter", + power_vote_src, + ), + ]; -#[test] -#[ignore] -fn size_check_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; + for tx in transactions.into_iter() { + submit_tx(&http_origin, &tx); } - let mut giant_contract = "(define-public (f) (ok 1))".to_string(); - for _i in 0..(1024 * 1024 + 500) { - giant_contract.push(' '); - } - - let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - - let (mut conf, miner_account) = neon_integration_test_conf(); - - // make a bunch of txs that will only fit one per block. - let txs: Vec<_> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - if ix % 2 == 0 { - make_contract_publish( - spender_sk, - 0, - 1049230, - conf.burnchain.chain_id, - "large-0", - &giant_contract, - ) - } else { - let tx = make_contract_publish_microblock_only( - spender_sk, - 0, - 1049230, - conf.burnchain.chain_id, - "large-0", - &giant_contract, - ); - let parsed_tx = StacksTransaction::consensus_deserialize(&mut &tx[..]).unwrap(); - debug!("Mine transaction {} in a microblock", &parsed_tx.txid()); - tx - } - }) - .collect(); - - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } - - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 5000; - conf.node.microblock_frequency = 5000; - conf.miner.microblock_attempt_time_ms = 120_000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + let vote_tx = make_contract_call( + &spender_sk, + 3, + 1000, + conf.burnchain.chain_id, + &spender_addr, + "voter", + "propose-vote-confirm", + &[], + ); - btc_regtest_controller.bootstrap_chain(201); + let call_le_tx = make_contract_call( + &spender_sk, + 4, + 1000, + conf.burnchain.chain_id, + &spender_addr, + "caller", + "execute-2", + &[Value::UInt(1)], + ); - eprintln!("Chain bootstrapped..."); + submit_tx(&http_origin, &vote_tx); + submit_tx(&http_origin, &call_le_tx); - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let channel = run_loop.get_coordinator_channel().unwrap(); + // clear and mine another burnchain block, so that the new winner is seen by the observer + // (the observer is logically "one block behind" the miner + test_observer::clear(); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - thread::spawn(move || run_loop.start(None, 0)); + let mut blocks = test_observer::get_blocks(); + // should have produced 1 new block + assert_eq!(blocks.len(), 1); + let block = blocks.pop().unwrap(); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + eprintln!("{}", transactions.len()); + let mut tested = false; + let mut exec_cost = ExecutionCost::ZERO; + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "execute-2" { + exec_cost = + serde_json::from_value(tx.get("execution_cost").cloned().unwrap()).unwrap(); + } else if contract_call.function_name.as_str() == "propose-vote-confirm" { + let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); + assert_eq!(parsed.to_string(), "(ok u0)"); + tested = true; + } + } + } + assert!(tested, "Should have found a contract call tx"); - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + // try to confirm the passed vote (this will fail) + let confirm_proposal = make_contract_call( + &spender_sk, + 5, + 1000, + conf.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "cost-voting", + "confirm-miners", + &[Value::UInt(0)], + ); - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + submit_tx(&http_origin, &confirm_proposal); - // first block will hold our VRF registration next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // second block will be the first mined Stacks block + // clear and mine another burnchain block, so that the new winner is seen by the observer + // (the observer is logically "one block behind" the miner + test_observer::clear(); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // let's query the miner's account nonce: - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - // and our potential spenders: - - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 1049230); + let mut blocks = test_observer::get_blocks(); + // should have produced 1 new block + assert_eq!(blocks.len(), 1); + let block = blocks.pop().unwrap(); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + eprintln!("{}", transactions.len()); + let mut tested = false; + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "confirm-miners" { + let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); + assert_eq!(parsed.to_string(), "(err 13)"); + tested = true; + } + } } + assert!(tested, "Should have found a contract call tx"); - for tx in txs.iter() { - // okay, let's push a bunch of transactions that can only fit one per block! - submit_tx(&http_origin, tx); + for _i in 0..58 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); } - let mut micro_block_txs = 0; - let mut anchor_block_txs = 0; + // confirm the passed vote + let confirm_proposal = make_contract_call( + &spender_sk, + 6, + 1000, + conf.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "cost-voting", + "confirm-miners", + &[Value::UInt(0)], + ); - for i in 0..100 { - // now let's mine a couple blocks, and then check the sender's nonce. - // at the end of mining three blocks, there should be _at least one_ transaction from the microblock - // only set that got mined (since the block before this one was empty, a microblock can - // be added), - // and a number of transactions from equal to the number anchor blocks will get mined. - // - // this one wakes up our node, so that it'll mine a microblock _and_ an anchor block. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // this one will contain the sortition from above anchor block, - // which *should* have also confirmed the microblock. - sleep_ms(10_000 * i); + submit_tx(&http_origin, &confirm_proposal); - micro_block_txs = 0; - anchor_block_txs = 0; + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // clear and mine another burnchain block, so that the new winner is seen by the observer + // (the observer is logically "one block behind" the miner + test_observer::clear(); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // let's figure out how many micro-only and anchor-only txs got accepted - // by examining our account nonces: - for (ix, spender_addr) in spender_addrs.iter().enumerate() { - let res = get_account(&http_origin, &spender_addr); - if res.nonce == 1 { - if ix % 2 == 0 { - anchor_block_txs += 1; - } else { - micro_block_txs += 1; - } - } else if res.nonce != 0 { - panic!("Spender address nonce incremented past 1"); + let mut blocks = test_observer::get_blocks(); + // should have produced 1 new block + assert_eq!(blocks.len(), 1); + let block = blocks.pop().unwrap(); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + eprintln!("{}", transactions.len()); + let mut tested = false; + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "confirm-miners" { + let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); + assert_eq!(parsed.to_string(), "(ok true)"); + tested = true; } - - debug!("Spender {ix},{spender_addr}: {res:?}"); } + } + assert!(tested, "Should have found a contract call tx"); - eprintln!("anchor_block_txs: {anchor_block_txs}, micro_block_txs: {micro_block_txs}"); - - if anchor_block_txs >= 2 && micro_block_txs >= 2 { - break; + let call_le_tx = make_contract_call( + &spender_sk, + 7, + 1000, + conf.burnchain.chain_id, + &spender_addr, + "caller", + "execute-2", + &[Value::UInt(1)], + ); + + submit_tx(&http_origin, &call_le_tx); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // clear and mine another burnchain block, so that the new winner is seen by the observer + // (the observer is logically "one block behind" the miner + test_observer::clear(); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let mut blocks = test_observer::get_blocks(); + // should have produced 1 new block + assert_eq!(blocks.len(), 1); + let block = blocks.pop().unwrap(); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + + let mut tested = false; + let mut new_exec_cost = ExecutionCost::max_value(); + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "execute-2" { + new_exec_cost = + serde_json::from_value(tx.get("execution_cost").cloned().unwrap()).unwrap(); + tested = true; + } } } + assert!(tested, "Should have found a contract call tx"); - assert!(anchor_block_txs >= 2); - assert!(micro_block_txs >= 2); + assert!(exec_cost.exceeds(&new_exec_cost)); test_observer::clear(); channel.stop_chains_coordinator(); } -// if a microblock consumes the majority of the block budget, then _only_ a microblock will be -// mined for an epoch. #[test] #[ignore] -fn size_overflow_unconfirmed_microblocks_integration_test() { +fn mining_events_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - // stuff a gigantic contract into the anchored block - let mut giant_contract = "(define-public (f) (ok 1))".to_string(); - for _i in 0..(1024 * 1024 + 500) { - giant_contract.push(' '); - } - - // small-sized contracts for microblocks - let mut small_contract = "(define-public (f) (ok 1))".to_string(); - for _i in 0..(1024 * 1024 + 500) { - small_contract.push(' '); - } + let small_contract = "(define-public (f) (ok 1))".to_string(); - let spender_sks: Vec<_> = (0..5).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let addr = to_addr(&spender_sk); - let (mut conf, miner_account) = neon_integration_test_conf(); + let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); + let addr_2 = to_addr(&spender_sk_2); - let txs: Vec> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - if ix % 2 == 0 { - // almost fills a whole block - vec![make_contract_publish( - spender_sk, - 0, - 1100000, - conf.burnchain.chain_id, - "large-0", - &giant_contract, - )] - } else { - let mut ret = vec![]; - for i in 0..25 { - let tx = make_contract_publish_microblock_only( - spender_sk, - i as u64, - 1100000, - conf.burnchain.chain_id, - &format!("small-{i}"), - &small_contract, - ); - ret.push(tx); - } - ret - } - }) - .collect(); + let (mut conf, _) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 10492300000, - }); - } + conf.initial_balances.push(InitialBalance { + address: addr.into(), + amount: 10000000, + }); + conf.initial_balances.push(InitialBalance { + address: addr_2.into(), + amount: 10000000, + }); conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 5_000; - conf.node.microblock_frequency = 5_000; - conf.miner.microblock_attempt_time_ms = 120_000; + conf.node.wait_time_for_microblocks = 1000; + conf.node.microblock_frequency = 1000; conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let tx = make_contract_publish( + &spender_sk, + 0, + 600000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 610000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + let mb_tx = make_contract_publish_microblock_only( + &spender_sk_2, + 0, + 620000, + conf.burnchain.chain_id, + "small", + &small_contract, + ); + test_observer::spawn(); - test_observer::register_any(&mut conf); + test_observer::register( + &mut conf, + &[ + EventKeyType::AnyEvent, + EventKeyType::MinedBlocks, + EventKeyType::MinedMicroblocks, + ], + ); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -4726,7 +4334,6 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { let mut run_loop = neon::RunLoop::new(conf); let blocks_processed = run_loop.get_blocks_processed_arc(); - let microblocks_processed = run_loop.get_microblocks_processed_arc(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -4744,345 +4351,253 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // let's query the miner's account nonce: - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - // and our potential spenders: - - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10492300000); - } - - for tx_batch in txs.iter() { - for tx in tx_batch.iter() { - // okay, let's push a bunch of transactions that can only fit one per block! - submit_tx(&http_origin, tx); - } - } - - while wait_for_microblocks(µblocks_processed, 120) { - info!("Waiting for microblocks to no longer be processed"); - } + submit_tx(&http_origin, &tx); // should succeed + submit_tx(&http_origin, &tx_2); // should fail since it tries to publish contract with same name + submit_tx(&http_origin, &mb_tx); // should be in microblock bc it is microblock only - // now let's mine a couple blocks, and then check the sender's nonce. - // at the end of mining three blocks, there should be _two_ transactions from the microblock - // only set that got mined (since the block before this one was empty, a microblock can - // be added), - // and _two_ transactions from the two anchor blocks that got mined (and processed) - // - // this one wakes up our node, so that it'll mine a microblock _and_ an anchor block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // this one will contain the sortition from above anchor block, - // which *should* have also confirmed the microblock. - - while wait_for_microblocks(µblocks_processed, 120) { - info!("Waiting for microblocks to no longer be processed"); - } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(30_000); - - let blocks = test_observer::get_blocks(); - assert_eq!(blocks.len(), 4); // genesis block + 3 blocks + // check that the nonces have gone up + let res = get_account(&http_origin, &addr); + assert_eq!(res.nonce, 1); - let mut max_big_txs_per_block = 0; - let mut max_big_txs_per_microblock = 0; - let mut total_big_txs_per_block = 0; - let mut total_big_txs_per_microblock = 0; + let res = get_account(&http_origin, &addr_2); + assert_eq!(res.nonce, 1); - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); + // check mined microblock events + let mined_microblock_events = test_observer::get_mined_microblocks(); + assert!(!mined_microblock_events.is_empty()); - let mut num_big_anchored_txs = 0; - let mut num_big_microblock_txs = 0; + // check tx events in the first microblock + // 1 success: 1 contract publish, 2 error (on chain transactions) + let microblock_tx_events = &mined_microblock_events[0].tx_events; + assert_eq!(microblock_tx_events.len(), 1); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().contains("large-") { - num_big_anchored_txs += 1; - total_big_txs_per_block += 1; - } else if tsc.name.to_string().contains("small") { - num_big_microblock_txs += 1; - total_big_txs_per_microblock += 1; + // contract publish + match µblock_tx_events[0] { + TransactionEvent::Success(TransactionSuccessEvent { + result, + fee, + execution_cost, + .. + }) => { + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); + assert_eq!(fee, &620000); + assert_eq!( + execution_cost, + &ExecutionCost { + write_length: 35, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 311000 } - } - } - - if num_big_anchored_txs > max_big_txs_per_block { - max_big_txs_per_block = num_big_anchored_txs; - } - if num_big_microblock_txs > max_big_txs_per_microblock { - max_big_txs_per_microblock = num_big_microblock_txs; + ) } + _ => panic!("unexpected event type"), } - eprintln!( - "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}, total_big_txs_per_block: {total_big_txs_per_block}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" - ); - - assert!(max_big_txs_per_block > 0); - assert!(max_big_txs_per_microblock > 0); - assert!(total_big_txs_per_block > 0); - assert!(total_big_txs_per_microblock > 0); - - // can't have too many - assert!(max_big_txs_per_microblock <= 3); - assert!(max_big_txs_per_block <= 1); - - // NOTE: last-mined blocks aren't counted by the observer - assert!(total_big_txs_per_block <= 2); - assert!(total_big_txs_per_microblock <= 3); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -// mine a stream of microblocks, and verify that the miner won't let us overflow the size -#[test] -#[ignore] -fn size_overflow_unconfirmed_stream_microblocks_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let mut small_contract = "(define-public (f) (ok 1))".to_string(); - for _i in 0..((1024 * 1024 + 500) / 3) { - small_contract.push(' '); - } - - let spender_sks: Vec<_> = (0..20).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + // check mined block events + let mined_block_events = test_observer::get_mined_blocks(); + assert!(mined_block_events.len() >= 3); - let (mut conf, miner_account) = neon_integration_test_conf(); + // check the tx events in the third mined block + // 2 success: 1 coinbase tx event + 1 contract publish, 1 error (duplicate contract) + let third_block_tx_events = &mined_block_events[2].tx_events; + assert_eq!(third_block_tx_events.len(), 3); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 10492300000, - }); + // coinbase event + match &third_block_tx_events[0] { + TransactionEvent::Success(TransactionSuccessEvent { txid, result, .. }) => { + assert_eq!( + txid.to_string(), + "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6" + ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); + } + _ => panic!("unexpected event type"), } - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 1000; - conf.node.microblock_frequency = 1000; - conf.miner.microblock_attempt_time_ms = 120_000; - conf.node.max_microblocks = 65536; - conf.burnchain.max_rbf = 1000000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - let txs: Vec<_> = spender_sks - .iter() - .map(|spender_sk| { - make_contract_publish_microblock_only( - spender_sk, - 0, - 600000, - conf.burnchain.chain_id, - "small", - &small_contract, + // contract publish event + match &third_block_tx_events[1] { + TransactionEvent::Success(TransactionSuccessEvent { + result, + fee, + execution_cost, + .. + }) => { + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); + assert_eq!(fee, &600000); + assert_eq!( + execution_cost, + &ExecutionCost { + write_length: 35, + write_count: 2, + read_length: 1, + read_count: 1, + runtime: 311000 + } ) - }) - .collect(); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let microblocks_processed = run_loop.get_microblocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // let's query the miner's account nonce: - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10492300000); - } - - let mut ctr = 0; - while ctr < txs.len() { - submit_tx(&http_origin, &txs[ctr]); - if !wait_for_microblocks(µblocks_processed, 60) { - // we time out if we *can't* mine any more microblocks - break; } - ctr += 1; - } - - // should be able to fit 5 transactions in, in 5 microblocks - assert_eq!(ctr, 5); - sleep_ms(5_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - eprintln!("First confirmed microblock stream!"); - - microblocks_processed.store(0, Ordering::SeqCst); - - while ctr < txs.len() { - submit_tx(&http_origin, &txs[ctr]); - ctr += 1; + _ => panic!("unexpected event type"), } - wait_for_microblocks(µblocks_processed, 60); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - eprintln!("Second confirmed microblock stream!"); - - wait_for_microblocks(µblocks_processed, 60); - - // confirm it - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // this test can sometimes miss a mine block event. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let blocks = test_observer::get_blocks(); - assert!(blocks.len() >= 5, "Should have produced at least 5 blocks"); - - let mut max_big_txs_per_microblock = 0; - let mut total_big_txs_per_microblock = 0; - - // NOTE: this only counts the number of txs per stream, not in each microblock - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); - - let mut num_big_microblock_txs = 0; - - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().contains("small") { - num_big_microblock_txs += 1; - total_big_txs_per_microblock += 1; - } - } - } - if num_big_microblock_txs > max_big_txs_per_microblock { - max_big_txs_per_microblock = num_big_microblock_txs; + // dupe contract error event + match &third_block_tx_events[2] { + TransactionEvent::ProcessingError(TransactionErrorEvent { txid: _, error }) => { + assert_eq!( + error, + "Duplicate contract 'ST3WM51TCWMJYGZS1QFMC28DH5YP86782YGR113C1.small'" + ); } + _ => panic!("unexpected event type"), } - eprintln!( - "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" - ); - - assert_eq!(max_big_txs_per_microblock, 5); - assert!(total_big_txs_per_microblock >= 10); - test_observer::clear(); channel.stop_chains_coordinator(); } -// Mine a too-long microblock stream, and verify that the anchored block miner truncates it down to -// the longest prefix of the stream that can be mined. +/// This test checks that the limit behavior in the miner works as expected for anchored block +/// building. When we first hit the block limit, the limit behavior switches to +/// `CONTRACT_LIMIT_HIT`, during which stx transfers are still allowed, and contract related +/// transactions are skipped. +/// Note: the test is sensitive to the order in which transactions are mined; it is written +/// expecting that transactions are traversed in the order tx_1, tx_2, tx_3, and tx_4. #[test] #[ignore] -fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { +fn block_limit_hit_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - // create microblock streams that are too big - env::set_var(core::FAULT_DISABLE_MICROBLOCKS_BYTES_CHECK, "1"); - env::set_var(core::FAULT_DISABLE_MICROBLOCKS_COST_CHECK, "1"); + // 700 invocations + let max_contract_src = format!( + "(define-private (work) (begin {} 1)) + (define-private (times-100) (begin {} 1)) + (define-private (times-200) (begin (times-100) (times-100) 1)) + (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) + (times-500) (times-200)", + (0..10) + .map(|_| format!( + "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", + boot_code_id("cost-voting", false), + boot_code_id("costs", false), + boot_code_id("costs", false), + )) + .collect::>() + .join(" "), + (0..10) + .map(|_| "(work)".to_string()) + .collect::>() + .join(" "), + ); - let mut small_contract = "(define-public (f) (ok 1))".to_string(); - for _i in 0..((1024 * 1024 + 500) / 8) { - small_contract.push(' '); - } + // 2900 invocations + let oversize_contract_src = format!( + "(define-private (work) (begin {} 1)) + (define-private (times-100) (begin {} 1)) + (define-private (times-200) (begin (times-100) (times-100) 1)) + (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) + (define-private (times-1000) (begin (times-500) (times-500) 1)) + (times-1000) (times-1000) (times-500) (times-200) (times-200)", + (0..10) + .map(|_| format!( + "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", + boot_code_id("cost-voting", false), + boot_code_id("costs", false), + boot_code_id("costs", false), + )) + .collect::>() + .join(" "), + (0..10) + .map(|_| "(work)".to_string()) + .collect::>() + .join(" "), + ); - let spender_sks: Vec<_> = (0..25).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let spender_sk = StacksPrivateKey::random(); + let addr = to_addr(&spender_sk); + let second_spender_sk = StacksPrivateKey::random(); + let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); + let third_spender_sk = StacksPrivateKey::random(); + let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); - let (mut conf, miner_account) = neon_integration_test_conf(); + let (mut conf, _miner_account) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 10492300000, - }); - } + conf.initial_balances.push(InitialBalance { + address: addr.into(), + amount: 10_000_000, + }); + conf.initial_balances.push(InitialBalance { + address: second_spender_addr.clone(), + amount: 10_000_000, + }); + conf.initial_balances.push(InitialBalance { + address: third_spender_addr.clone(), + amount: 10_000_000, + }); conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 5_000; - conf.node.microblock_frequency = 1_000; - conf.miner.microblock_attempt_time_ms = 120_000; - conf.node.max_microblocks = 65536; - conf.burnchain.max_rbf = 1000000; - - let txs: Vec> = spender_sks - .iter() - .map(|spender_sk| { - make_contract_publish_microblock_only( - spender_sk, - 0, - 1149230, - conf.burnchain.chain_id, - "small", - &small_contract, - ) - }) - .collect(); - - let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); - epochs[StacksEpochId::Epoch20].block_limit = core::BLOCK_LIMIT_MAINNET_20; - conf.burnchain.epochs = Some(epochs); + conf.node.wait_time_for_microblocks = 30000; + conf.node.microblock_frequency = 1000; conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + // included in first block + let tx = make_contract_publish( + &spender_sk, + 0, + 555_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); + // contract limit hit; included in second block + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 555_000, + conf.burnchain.chain_id, + "over-2", + &oversize_contract_src, + ); + // skipped over since contract limit was hit; included in second block + let tx_3 = make_contract_publish( + &second_spender_sk, + 0, + 150_000, + conf.burnchain.chain_id, + "max", + &max_contract_src, + ); + // included in first block + let tx_4 = make_stacks_transfer( + &third_spender_sk, + 0, + 180, + conf.burnchain.chain_id, + &PrincipalData::from(addr), + 100, + ); + test_observer::spawn(); test_observer::register_any(&mut conf); @@ -5100,7 +4615,6 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { let mut run_loop = neon::RunLoop::new(conf); let blocks_processed = run_loop.get_blocks_processed_arc(); - let microblocks_processed = run_loop.get_microblocks_processed_arc(); let channel = run_loop.get_coordinator_channel().unwrap(); @@ -5118,75 +4632,56 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // let's query the miner's account nonce: - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10492300000); - } - - let mut ctr = 0; - for _i in 0..6 { - submit_tx(&http_origin, &txs[ctr]); - if !wait_for_microblocks(µblocks_processed, 60) { - break; - } - ctr += 1; - } + // submit all the transactions + let txid_1 = submit_tx(&http_origin, &tx); + let txid_2 = submit_tx(&http_origin, &tx_2); + let txid_3 = submit_tx(&http_origin, &tx_3); + let txid_4 = submit_tx(&http_origin, &tx_4); - // confirm that we were able to use the fault-injection to *mine* 6 microblocks - assert_eq!(ctr, 6); sleep_ms(5_000); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(20_000); - eprintln!("First confirmed microblock stream!"); - - // confirm it next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(20_000); - let blocks = test_observer::get_blocks(); - assert_eq!(blocks.len(), 4); // genesis block + 3 blocks + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(20_000); - let mut max_big_txs_per_microblock = 0; - let mut total_big_txs_per_microblock = 0; + let res = get_account(&http_origin, &addr); + assert_eq!(res.nonce, 2); - // NOTE: this only counts the number of txs per stream, not in each microblock - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); + let res = get_account(&http_origin, &second_spender_addr); + assert_eq!(res.nonce, 1); - let mut num_big_microblock_txs = 0; + let res = get_account(&http_origin, &third_spender_addr); + assert_eq!(res.nonce, 1); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().contains("small") { - num_big_microblock_txs += 1; - total_big_txs_per_microblock += 1; - } - } - } - if num_big_microblock_txs > max_big_txs_per_microblock { - max_big_txs_per_microblock = num_big_microblock_txs; - } - } + let mined_block_events = test_observer::get_blocks(); + assert_eq!(mined_block_events.len(), 5); - eprintln!( - "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" - ); + let tx_third_block = mined_block_events[3] + .get("transactions") + .unwrap() + .as_array() + .unwrap(); + assert_eq!(tx_third_block.len(), 3); + let txid_1_exp = tx_third_block[1].get("txid").unwrap().as_str().unwrap(); + let txid_4_exp = tx_third_block[2].get("txid").unwrap().as_str().unwrap(); + assert_eq!(format!("0x{txid_1}"), txid_1_exp); + assert_eq!(format!("0x{txid_4}"), txid_4_exp); - assert_eq!(max_big_txs_per_microblock, 3); - assert!(total_big_txs_per_microblock <= 6); + let tx_fourth_block = mined_block_events[4] + .get("transactions") + .unwrap() + .as_array() + .unwrap(); + assert_eq!(tx_fourth_block.len(), 3); + let txid_2_exp = tx_fourth_block[1].get("txid").unwrap().as_str().unwrap(); + let txid_3_exp = tx_fourth_block[2].get("txid").unwrap().as_str().unwrap(); + assert_eq!(format!("0x{txid_2}"), txid_2_exp); + assert_eq!(format!("0x{txid_3}"), txid_3_exp); test_observer::clear(); channel.stop_chains_coordinator(); @@ -5194,156 +4689,77 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { #[test] #[ignore] -fn runtime_overflow_unconfirmed_microblocks_integration_test() { +fn block_large_tx_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sks: Vec<_> = (0..4).map(|_| StacksPrivateKey::random()).collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let spender_addrs_c32: Vec = spender_sks.iter().map(to_addr).collect(); + let small_contract_src = format!( + "(define-public (f) (begin {} (ok 1))) (begin (f))", + (0..700) + .map(|_| format!( + "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", + boot_code_id("cost-voting", false), + boot_code_id("costs", false), + boot_code_id("costs", false), + )) + .collect::>() + .join(" ") + ); + + let oversize_contract_src = format!( + "(define-public (f) (begin {} (ok 1))) (begin (f))", + (0..3500) + .map(|_| format!( + "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", + boot_code_id("cost-voting", false), + boot_code_id("costs", false), + boot_code_id("costs", false), + )) + .collect::>() + .join(" ") + ); + + let spender_sk = StacksPrivateKey::random(); + let spender_addr = to_addr(&spender_sk); let (mut conf, miner_account) = neon_integration_test_conf(); + test_observer::spawn(); + test_observer::register_any(&mut conf); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } + conf.initial_balances.push(InitialBalance { + address: spender_addr.into(), + amount: 10000000, + }); conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 15000; - conf.miner.microblock_attempt_time_ms = 120_000; + conf.node.wait_time_for_microblocks = 30000; + conf.node.microblock_frequency = 1000; + + conf.miner.microblock_attempt_time_ms = i64::MAX as u64; + conf.burnchain.max_rbf = 10_000_000; + conf.node.wait_time_for_blocks = 1_000; conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); - epochs[StacksEpochId::Epoch20].block_limit = core::BLOCK_LIMIT_MAINNET_20; - conf.burnchain.epochs = Some(epochs); - - let txs: Vec> = spender_sks - .iter() - .enumerate() - .map(|(ix, spender_sk)| { - if ix % 2 == 0 { - // almost fills a whole block - vec![make_contract_publish( - spender_sk, - 0, - 1049230, - conf.burnchain.chain_id, - &format!("large-{ix}"), - &format!(" - ;; a single one of these transactions consumes over half the runtime budget - (define-constant BUFF_TO_BYTE (list - 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f - 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f - 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f - 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f - 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f - 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f - 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f - 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f - 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f - 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f - 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf - 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf - 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf - 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf - 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef - 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff - )) - (define-private (crash-me-folder (input (buff 1)) (ctr uint)) - (begin - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (+ u1 ctr) - ) - ) - (define-public (crash-me (name (string-ascii 128))) - (begin - (fold crash-me-folder BUFF_TO_BYTE u0) - (print name) - (ok u0) - ) - ) - (begin - (crash-me \"large-contract-{}-{ix}\")) - ", - &spender_addrs_c32[ix] - ) - )] - } else { - let mut ret = vec![]; - for i in 0..1 { - let tx = make_contract_publish_microblock_only( - spender_sk, - i as u64, - 210000, - conf.burnchain.chain_id, - &format!("small-{ix}-{i}"), - &format!(" - ;; a single one of these transactions consumes over half the runtime budget - (define-constant BUFF_TO_BYTE (list - 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f - 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f - 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f - 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f - 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f - 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f - 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f - 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f - 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f - 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f - 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf - 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf - 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf - 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf - 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef - 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff - )) - (define-private (crash-me-folder (input (buff 1)) (ctr uint)) - (begin - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (unwrap-panic (index-of BUFF_TO_BYTE input)) - (+ u1 ctr) - ) - ) - (define-public (crash-me (name (string-ascii 128))) - (begin - (fold crash-me-folder BUFF_TO_BYTE u0) - (print name) - (ok u0) - ) - ) - (begin - (crash-me \"small-contract-{}-{ix}-{i}\")) - ", spender_addrs_c32[ix]) - ); - ret.push(tx); - } - ret - } - }) - .collect(); - - test_observer::spawn(); - test_observer::register_any(&mut conf); + // higher fee for tx means it will get mined first + let tx = make_contract_publish( + &spender_sk, + 0, + 671_000, + conf.burnchain.chain_id, + "small", + &small_contract_src, + ); + let tx_2 = make_contract_publish( + &spender_sk, + 1, + 670_000, + conf.burnchain.chain_id, + "over", + &oversize_contract_src, + ); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -5376,108 +4792,26 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // let's query the miner's account nonce: let account = get_account(&http_origin, &miner_account); assert_eq!(account.nonce, 1); assert_eq!(account.balance, 0); - // and our potential spenders: - - for spender_addr in spender_addrs.iter() { - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 1049230); - } - - for tx_batch in txs.iter() { - for tx in tx_batch.iter() { - // okay, let's push a bunch of transactions that can only fit one per block! - submit_tx(&http_origin, tx); - } - } - - debug!("Wait for 1st microblock to be mined"); - sleep_ms(150_000); - - // now let's mine a couple blocks, and then check the sender's nonce. - // at the end of mining three blocks, there should be _two_ transactions from the microblock - // only set that got mined (since the block before this one was empty, a microblock can - // be added), - // and _two_ transactions from the two anchor blocks that got mined (and processed) - // - // this one wakes up our node, so that it'll mine a microblock _and_ an anchor block. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // this one will contain the sortition from above anchor block, - // which *should* have also confirmed the microblock. - - debug!("Wait for 2nd microblock to be mined"); - sleep_ms(150_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - debug!("Wait for 3nd microblock to be mined"); - sleep_ms(150_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let blocks = test_observer::get_blocks(); - assert_eq!(blocks.len(), 5); // genesis block + 4 blocks - - let mut max_big_txs_per_block = 0; - let mut max_big_txs_per_microblock = 0; - let mut total_big_txs_in_blocks = 0; - let mut total_big_txs_in_microblocks = 0; - - for block in blocks { - eprintln!("block {block:?}"); - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - let mut num_big_anchored_txs = 0; - let mut num_big_microblock_txs = 0; - - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - eprintln!("tx: {parsed:?}"); - if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().contains("large-") { - num_big_anchored_txs += 1; - total_big_txs_in_blocks += 1; - } else if tsc.name.to_string().contains("small") { - num_big_microblock_txs += 1; - total_big_txs_in_microblocks += 1; - } - } - } + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.nonce, 0); + assert_eq!(account.balance, 10000000); - if num_big_anchored_txs > max_big_txs_per_block { - max_big_txs_per_block = num_big_anchored_txs; - } - if num_big_microblock_txs > max_big_txs_per_microblock { - max_big_txs_per_microblock = num_big_microblock_txs; - } - } + let normal_txid = submit_tx(&http_origin, &tx); + let huge_txid = submit_tx(&http_origin, &tx_2); - info!( - "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}" - ); - info!( - "total_big_txs_in_microblocks: {total_big_txs_in_microblocks}, total_big_txs_in_blocks: {total_big_txs_in_blocks}" - ); + eprintln!("Try to mine a too-big tx. Normal = {normal_txid}, TooBig = {huge_txid}"); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 1200); - // at most one big tx per block and at most one big tx per stream, always. - assert_eq!(max_big_txs_per_microblock, 1); - assert_eq!(max_big_txs_per_block, 1); + eprintln!("Finished trying to mine a too-big tx"); - // if the mblock stream has a big tx, the anchored block won't (and vice versa) - // the changes for miner cost tracking (reset tracker between microblock and block, #2913) - // altered this test so that one more big tx ends up in an anchored block and one fewer - // ends up in a microblock - assert_eq!(total_big_txs_in_blocks, 2); - assert_eq!(total_big_txs_in_microblocks, 1); + let dropped_txs = test_observer::get_memtx_drops(); + assert_eq!(dropped_txs.len(), 1); + assert_eq!(&dropped_txs[0].1, "TooExpensive"); + assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); test_observer::clear(); channel.stop_chains_coordinator(); @@ -5485,178 +4819,94 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { #[test] #[ignore] -fn block_replay_integration_test() { +fn pox_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - let (mut conf, miner_account) = neon_integration_test_conf(); + let spender_2_sk = StacksPrivateKey::random(); + let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 100300, - }); + let spender_3_sk = StacksPrivateKey::random(); + let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 30000; - conf.node.microblock_frequency = 5_000; + let pox_pubkey = Secp256k1PublicKey::from_hex( + "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", + ) + .unwrap(); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let pox_2_pubkey = Secp256k1PublicKey::from_private(&StacksPrivateKey::random()); + let pox_2_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_2_pubkey).to_bytes()); + + let pox_2_address = BitcoinAddress::from_bytes_legacy( + BitcoinNetworkType::Testnet, + LegacyBitcoinAddressType::PublicKeyHash, + &Hash160::from_node_public_key(&pox_2_pubkey).to_bytes(), + ) + .unwrap(); + + let (mut conf, miner_account) = neon_integration_test_conf(); test_observer::spawn(); test_observer::register_any(&mut conf); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + // required for testing post-sunset behavior + conf.node.always_use_affirmation_maps = false; - btc_regtest_controller.bootstrap_chain(201); + let first_bal = 6_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); + let second_bal = 2_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); + let third_bal = 2_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); + let stacked_bal = 1_000_000_000 * u128::from(core::MICROSTACKS_PER_STACKS); - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // let's query the miner's account nonce: - - info!("Miner account: {miner_account}"); - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.balance, 0); - assert_eq!(account.nonce, 1); - - // and our spender - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.balance, 100300); - assert_eq!(account.nonce, 0); - - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let tx = make_stacks_transfer( - &spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); - submit_tx(&http_origin, &tx); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // try and push the mined block back at the node lots of times - let (tip_consensus_hash, tip_block) = get_tip_anchored_block(&conf); - let mut tip_block_bytes = vec![]; - tip_block.consensus_serialize(&mut tip_block_bytes).unwrap(); - - for i in 0..1024 { - let path = format!("{http_origin}/v2/blocks/upload/{tip_consensus_hash}"); - let res_text = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tip_block_bytes.clone()) - .send() - .unwrap() - .text() - .unwrap(); - - eprintln!("{i}: text of {path}\n{res_text}"); - } - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -fn cost_voting_integration() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - // let's make `<` free... - let cost_definer_src = " - (define-read-only (cost-definition-le (size uint)) - { - runtime: u0, write_length: u0, write_count: u0, read_count: u0, read_length: u0 - }) - "; - - // the contract that we'll test the costs of - let caller_src = " - (define-public (execute-2 (a uint)) - (ok (< a a))) - "; - - let power_vote_src = " - (define-public (propose-vote-confirm) - (let - ((proposal-id (unwrap-panic (contract-call? 'ST000000000000000000002AMW42H.cost-voting submit-proposal - 'ST000000000000000000002AMW42H.costs \"cost_le\" - .cost-definer \"cost-definition-le\"))) - (vote-amount (* u9000000000 u1000000))) - (try! (contract-call? 'ST000000000000000000002AMW42H.cost-voting vote-proposal proposal-id vote-amount)) - (try! (contract-call? 'ST000000000000000000002AMW42H.cost-voting confirm-votes proposal-id)) - (ok proposal-id))) - "; + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: first_bal, + }); - let spender_sk = StacksPrivateKey::random(); - let spender_addr = to_addr(&spender_sk); - let spender_princ: PrincipalData = spender_addr.into(); + conf.initial_balances.push(InitialBalance { + address: spender_2_addr, + amount: second_bal, + }); - let (mut conf, miner_account) = neon_integration_test_conf(); + conf.initial_balances.push(InitialBalance { + address: spender_3_addr, + amount: third_bal, + }); conf.miner.microblock_attempt_time_ms = 1_000; conf.node.wait_time_for_microblocks = 0; conf.node.microblock_frequency = 1_000; - conf.miner.first_attempt_time_ms = 2_000; - conf.miner.subsequent_attempt_time_ms = 5_000; conf.burnchain.max_rbf = 10_000_000; conf.node.wait_time_for_blocks = 1_000; - test_observer::spawn(); - test_observer::register_any(&mut conf); - - let spender_bal = 10_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); - - conf.initial_balances.push(InitialBalance { - address: spender_princ.clone(), - amount: spender_bal, - }); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); - let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + // reward cycle length = 15, so 10 reward cycle slots + 5 prepare-phase burns + let reward_cycle_len = 15; + let prepare_phase_len = 5; + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + (16 * reward_cycle_len - 1).into(), + (17 * reward_cycle_len).into(), + u32::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); + burnchain_config.pox_constants = pox_constants.clone(); let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( conf.clone(), @@ -5667,6 +4917,7 @@ fn cost_voting_integration() { let http_origin = format!("http://{}", &conf.node.rpc_bind); btc_regtest_controller.bootstrap_chain(201); + let burnchain = burnchain_config.clone(); eprintln!("Chain bootstrapped..."); @@ -5688,3899 +4939,1961 @@ fn cost_voting_integration() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // let's query the miner's account nonce: - let res = get_account(&http_origin, &miner_account); - assert_eq!(res.balance, 0); - assert_eq!(res.nonce, 1); - - // and our spender: - let res = get_account(&http_origin, &spender_princ); - assert_eq!(res.balance, spender_bal as u128); - assert_eq!(res.nonce, 0); + let sort_height = channel.get_sortitions_processed(); - let transactions = vec![ - make_contract_publish( - &spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - "cost-definer", - cost_definer_src, - ), - make_contract_publish( - &spender_sk, - 1, - 1000, - conf.burnchain.chain_id, - "caller", - caller_src, - ), - make_contract_publish( - &spender_sk, - 2, - 1000, - conf.burnchain.chain_id, - "voter", - power_vote_src, - ), - ]; + // let's query the miner's account nonce: + let account = get_account(&http_origin, &miner_account); + assert_eq!(account.balance, 0); + assert_eq!(account.nonce, 1); - for tx in transactions.into_iter() { - submit_tx(&http_origin, &tx); - } + // and our potential spenders: + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.balance, first_bal as u128); + assert_eq!(account.nonce, 0); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let pox_info = get_pox_info(&http_origin).unwrap(); - let vote_tx = make_contract_call( - &spender_sk, - 3, - 1000, - conf.burnchain.chain_id, - &spender_addr, - "voter", - "propose-vote-confirm", - &[], + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert_eq!(pox_info.first_burnchain_block_height, 0); + assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.stacked_ustx, 0); + assert!(!pox_info.current_cycle.is_pox_active); + assert_eq!(pox_info.next_cycle.stacked_ustx, 0); + assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); + assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 210); + assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 205); + assert_eq!(pox_info.next_cycle.min_increment_ustx, 1250710410920); + assert_eq!( + pox_info.prepare_cycle_length as u32, + pox_constants.prepare_length + ); + assert_eq!( + pox_info.rejection_fraction, + Some(pox_constants.pox_rejection_fraction) + ); + let reward_cycle = burnchain + .block_height_to_reward_cycle(sort_height) + .expect("Expected to be able to get reward cycle"); + assert_eq!(pox_info.reward_cycle_id, reward_cycle); + assert_eq!(pox_info.current_cycle.id, reward_cycle); + assert_eq!(pox_info.next_cycle.id, reward_cycle + 1); + assert_eq!( + pox_info.reward_cycle_length as u32, + pox_constants.reward_cycle_length ); + assert_eq!(pox_info.total_liquid_supply_ustx, 10005683287360023); + assert_eq!(pox_info.next_reward_cycle_in, 6); - let call_le_tx = make_contract_call( + let tx = make_contract_call( &spender_sk, - 4, - 1000, + 0, + 260, conf.burnchain.chain_id, - &spender_addr, - "caller", - "execute-2", - &[Value::UInt(1)], - ); - - submit_tx(&http_origin, &vote_tx); - submit_tx(&http_origin, &call_le_tx); + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked_bal), + execute( + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), + ClarityVersion::Clarity1, + ) + .unwrap() + .unwrap(), + Value::UInt(sort_height as u128), + Value::UInt(6), + ], + ); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // okay, let's push that stacking transaction! + submit_tx(&http_origin, &tx); - // clear and mine another burnchain block, so that the new winner is seen by the observer - // (the observer is logically "one block behind" the miner + let mut sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); test_observer::clear(); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let mut blocks = test_observer::get_blocks(); - // should have produced 1 new block - assert_eq!(blocks.len(), 1); - let block = blocks.pop().unwrap(); - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); - let mut tested = false; - let mut exec_cost = ExecutionCost::ZERO; - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::ContractCall(contract_call) = parsed.payload { - eprintln!("{}", contract_call.function_name.as_str()); - if contract_call.function_name.as_str() == "execute-2" { - exec_cost = - serde_json::from_value(tx.get("execution_cost").cloned().unwrap()).unwrap(); - } else if contract_call.function_name.as_str() == "propose-vote-confirm" { - let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); - assert_eq!(parsed.to_string(), "(ok u0)"); - tested = true; - } - } + // now let's mine until the next reward cycle starts ... + while sort_height < ((14 * pox_constants.reward_cycle_length) + 1).into() { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); } - assert!(tested, "Should have found a contract call tx"); - - // try to confirm the passed vote (this will fail) - let confirm_proposal = make_contract_call( - &spender_sk, - 5, - 1000, - conf.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "cost-voting", - "confirm-miners", - &[Value::UInt(0)], - ); - submit_tx(&http_origin, &confirm_proposal); + let pox_info = get_pox_info(&http_origin).unwrap(); + let reward_cycle = burnchain + .block_height_to_reward_cycle(sort_height) + .expect("Expected to be able to get reward cycle"); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert_eq!(pox_info.first_burnchain_block_height, 0); + assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); + assert!(pox_info.pox_activation_threshold_ustx > 1500000000000000); + assert!(!pox_info.current_cycle.is_pox_active); + assert_eq!(pox_info.next_cycle.stacked_ustx, 1000000000000000); + assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); + assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); + assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 220); + assert_eq!( + pox_info.prepare_cycle_length as u32, + pox_constants.prepare_length + ); + assert_eq!( + pox_info.rejection_fraction, + Some(pox_constants.pox_rejection_fraction) + ); + assert_eq!(pox_info.reward_cycle_id, reward_cycle); + assert_eq!(pox_info.current_cycle.id, reward_cycle); + assert_eq!(pox_info.next_cycle.id, reward_cycle + 1); + assert_eq!( + pox_info.reward_cycle_length as u32, + pox_constants.reward_cycle_length + ); + assert_eq!(pox_info.next_reward_cycle_in, 14); - // clear and mine another burnchain block, so that the new winner is seen by the observer - // (the observer is logically "one block behind" the miner - test_observer::clear(); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let blocks_observed = test_observer::get_blocks(); + assert!( + blocks_observed.len() >= 2, + "Blocks observed {} should be >= 2", + blocks_observed.len() + ); - let mut blocks = test_observer::get_blocks(); - // should have produced 1 new block - assert_eq!(blocks.len(), 1); - let block = blocks.pop().unwrap(); - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); + // look up the return value of our stacking operation... let mut tested = false; - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; + for block in blocks_observed.iter() { + if tested { + break; } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::ContractCall(contract_call) = parsed.payload { - eprintln!("{}", contract_call.function_name.as_str()); - if contract_call.function_name.as_str() == "confirm-miners" { - let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); - assert_eq!(parsed.to_string(), "(err 13)"); - tested = true; + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "stack-stx" { + let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); + let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); + // should unlock at height 300 (we're in reward cycle 13, lockup starts in reward cycle + // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward + // cycle length of 15 blocks, is a burnchain height of 300) + assert_eq!(parsed.to_string(), + format!("(ok (tuple (lock-amount u1000000000000000) (stacker {spender_addr}) (unlock-burn-height u300)))")); + tested = true; + } } } } - assert!(tested, "Should have found a contract call tx"); - for _i in 0..58 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - } + assert!(tested, "Should have observed stack-stx transaction"); - // confirm the passed vote - let confirm_proposal = make_contract_call( - &spender_sk, - 6, - 1000, + // let's stack with spender 2 and spender 3... + + // now let's have sender_2 and sender_3 stack to pox spender_addr 2 in + // two different txs, and make sure that they sum together in the reward set. + + let tx = make_contract_call( + &spender_2_sk, + 0, + 260, conf.burnchain.chain_id, &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "cost-voting", - "confirm-miners", - &[Value::UInt(0)], + "pox", + "stack-stx", + &[ + Value::UInt(stacked_bal / 2), + execute( + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), + ClarityVersion::Clarity1, + ) + .unwrap() + .unwrap(), + Value::UInt(sort_height as u128), + Value::UInt(6), + ], ); - submit_tx(&http_origin, &confirm_proposal); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // clear and mine another burnchain block, so that the new winner is seen by the observer - // (the observer is logically "one block behind" the miner - test_observer::clear(); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let mut blocks = test_observer::get_blocks(); - // should have produced 1 new block - assert_eq!(blocks.len(), 1); - let block = blocks.pop().unwrap(); - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); - let mut tested = false; - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::ContractCall(contract_call) = parsed.payload { - eprintln!("{}", contract_call.function_name.as_str()); - if contract_call.function_name.as_str() == "confirm-miners" { - let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); - assert_eq!(parsed.to_string(), "(ok true)"); - tested = true; - } - } - } - assert!(tested, "Should have found a contract call tx"); + // okay, let's push that stacking transaction! + submit_tx(&http_origin, &tx); - let call_le_tx = make_contract_call( - &spender_sk, - 7, - 1000, + let tx = make_contract_call( + &spender_3_sk, + 0, + 260, conf.burnchain.chain_id, - &spender_addr, - "caller", - "execute-2", - &[Value::UInt(1)], + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox", + "stack-stx", + &[ + Value::UInt(stacked_bal / 2), + execute( + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), + ClarityVersion::Clarity1, + ) + .unwrap() + .unwrap(), + Value::UInt(sort_height as u128), + Value::UInt(6), + ], ); - submit_tx(&http_origin, &call_le_tx); + submit_tx(&http_origin, &tx); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // clear and mine another burnchain block, so that the new winner is seen by the observer - // (the observer is logically "one block behind" the miner - test_observer::clear(); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // mine until the end of the current reward cycle. + sort_height = channel.get_sortitions_processed(); + while sort_height < ((15 * pox_constants.reward_cycle_length) - 1).into() { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); + } - let mut blocks = test_observer::get_blocks(); - // should have produced 1 new block - assert_eq!(blocks.len(), 1); - let block = blocks.pop().unwrap(); - let transactions = block.get("transactions").unwrap().as_array().unwrap(); + let pox_info = get_pox_info(&http_origin).unwrap(); - let mut tested = false; - let mut new_exec_cost = ExecutionCost::max_value(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::ContractCall(contract_call) = parsed.payload { - eprintln!("{}", contract_call.function_name.as_str()); - if contract_call.function_name.as_str() == "execute-2" { - new_exec_cost = - serde_json::from_value(tx.get("execution_cost").cloned().unwrap()).unwrap(); - tested = true; - } - } - } - assert!(tested, "Should have found a contract call tx"); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert_eq!(pox_info.first_burnchain_block_height, 0); + assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); + assert!(!pox_info.current_cycle.is_pox_active); + assert_eq!(pox_info.next_cycle.stacked_ustx, 2000000000000000); + assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); + assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); + assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 220); + assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); + assert_eq!( + pox_info.prepare_cycle_length as u32, + pox_constants.prepare_length + ); + assert_eq!( + pox_info.rejection_fraction, + Some(pox_constants.pox_rejection_fraction) + ); + assert_eq!(pox_info.reward_cycle_id, 14); + assert_eq!(pox_info.current_cycle.id, 14); + assert_eq!(pox_info.next_cycle.id, 15); + assert_eq!( + pox_info.reward_cycle_length as u32, + pox_constants.reward_cycle_length + ); + assert_eq!(pox_info.next_reward_cycle_in, 1); - assert!(exec_cost.exceeds(&new_exec_cost)); + // we should have received _no_ Bitcoin commitments, because the pox participation threshold + // was not met! + let utxos = btc_regtest_controller.get_all_utxos(&pox_pubkey); + eprintln!("Got UTXOs: {}", utxos.len()); + assert_eq!( + utxos.len(), + 0, + "Should have received no outputs during PoX reward cycle" + ); + // let's test the reward information in the observer test_observer::clear(); - channel.stop_chains_coordinator(); -} -#[test] -#[ignore] -fn mining_events_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; + // before sunset + // mine until the end of the next reward cycle, + // the participation threshold now should be met. + while sort_height < ((16 * pox_constants.reward_cycle_length) - 1).into() { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); } - let small_contract = "(define-public (f) (ok 1))".to_string(); + let pox_info = get_pox_info(&http_origin).unwrap(); - let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let addr = to_addr(&spender_sk); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert_eq!(pox_info.first_burnchain_block_height, 0); + assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); + assert_eq!(pox_info.current_cycle.stacked_ustx, 2000000000000000); + assert!(pox_info.current_cycle.is_pox_active); + assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 240); + assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 235); + assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); + assert_eq!(pox_info.next_reward_cycle_in, 1); - let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); - let addr_2 = to_addr(&spender_sk_2); + // we should have received _seven_ Bitcoin commitments, because our commitment was 7 * threshold + let utxos = btc_regtest_controller.get_all_utxos(&pox_pubkey); - let (mut conf, _) = neon_integration_test_conf(); + eprintln!("Got UTXOs: {}", utxos.len()); + assert_eq!( + utxos.len(), + 7, + "Should have received outputs during PoX reward cycle" + ); - conf.initial_balances.push(InitialBalance { - address: addr.into(), - amount: 10000000, - }); - conf.initial_balances.push(InitialBalance { - address: addr_2.into(), - amount: 10000000, - }); + // we should have received _seven_ Bitcoin commitments to pox_2_pubkey, because our commitment was 7 * threshold + // note: that if the reward set "summing" isn't implemented, this recipient would only have received _6_ slots, + // because each `stack-stx` call only received enough to get 3 slot individually. + let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 1000; - conf.node.microblock_frequency = 1000; + eprintln!("Got UTXOs: {}", utxos.len()); + assert_eq!( + utxos.len(), + 7, + "Should have received outputs during PoX reward cycle" + ); - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let burn_blocks = test_observer::get_burn_blocks(); + let mut recipient_slots: HashMap = HashMap::new(); - let tx = make_contract_publish( - &spender_sk, - 0, - 600000, - conf.burnchain.chain_id, - "small", - &small_contract, - ); - let tx_2 = make_contract_publish( - &spender_sk, - 1, - 610000, - conf.burnchain.chain_id, - "small", - &small_contract, + for block in burn_blocks.iter() { + let reward_slot_holders = block + .get("reward_slot_holders") + .unwrap() + .as_array() + .unwrap() + .iter() + .map(|x| x.as_str().unwrap().to_string()); + for holder in reward_slot_holders { + if let Some(current) = recipient_slots.get_mut(&holder) { + *current += 1; + } else { + recipient_slots.insert(holder, 1); + } + } + } + + let pox_1_address = BitcoinAddress::from_bytes_legacy( + BitcoinNetworkType::Testnet, + LegacyBitcoinAddressType::PublicKeyHash, + &Hash160::from_node_public_key(&pox_pubkey).to_bytes(), + ) + .unwrap(); + + assert_eq!(recipient_slots.len(), 2); + assert_eq!( + recipient_slots.get(&format!("{pox_2_address}")).cloned(), + Some(7u64) ); - let mb_tx = make_contract_publish_microblock_only( - &spender_sk_2, - 0, - 620000, - conf.burnchain.chain_id, - "small", - &small_contract, + assert_eq!( + recipient_slots.get(&format!("{pox_1_address}")).cloned(), + Some(7u64) ); - test_observer::spawn(); - test_observer::register( - &mut conf, - &[ - EventKeyType::AnyEvent, - EventKeyType::MinedBlocks, - EventKeyType::MinedMicroblocks, - ], - ); + // get the canonical chain tip + let tip_info = get_chain_info(&conf); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); + assert_eq!(tip_info.stacks_tip_height, 36); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + // now let's mine into the sunset + while sort_height < ((17 * pox_constants.reward_cycle_length) - 1).into() { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); + } - btc_regtest_controller.bootstrap_chain(201); + // get the canonical chain tip + let tip_info = get_chain_info(&conf); - eprintln!("Chain bootstrapped..."); + eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); + assert_eq!(tip_info.stacks_tip_height, 51); - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); + let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - let channel = run_loop.get_coordinator_channel().unwrap(); + // should receive more rewards during this cycle... + eprintln!("Got UTXOs: {}", utxos.len()); + assert_eq!( + utxos.len(), + 14, + "Should have received more outputs during the sunsetting PoX reward cycle" + ); - thread::spawn(move || run_loop.start(None, 0)); + // and after sunset + while sort_height < ((18 * pox_constants.reward_cycle_length) - 1).into() { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); + } - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // should *not* receive more rewards during the after sunset cycle... + eprintln!("Got UTXOs: {}", utxos.len()); + assert_eq!( + utxos.len(), + 14, + "Should have received no more outputs after sunset PoX reward cycle" + ); - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // should have progressed the chain, though! + // get the canonical chain tip + let tip_info = get_chain_info(&conf); - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - submit_tx(&http_origin, &tx); // should succeed - submit_tx(&http_origin, &tx_2); // should fail since it tries to publish contract with same name - submit_tx(&http_origin, &mb_tx); // should be in microblock bc it is microblock only - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // check that the nonces have gone up - let res = get_account(&http_origin, &addr); - assert_eq!(res.nonce, 1); - - let res = get_account(&http_origin, &addr_2); - assert_eq!(res.nonce, 1); - - // check mined microblock events - let mined_microblock_events = test_observer::get_mined_microblocks(); - assert!(!mined_microblock_events.is_empty()); - - // check tx events in the first microblock - // 1 success: 1 contract publish, 2 error (on chain transactions) - let microblock_tx_events = &mined_microblock_events[0].tx_events; - assert_eq!(microblock_tx_events.len(), 1); - - // contract publish - match µblock_tx_events[0] { - TransactionEvent::Success(TransactionSuccessEvent { - result, - fee, - execution_cost, - .. - }) => { - assert!(result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap()); - assert_eq!(fee, &620000); - assert_eq!( - execution_cost, - &ExecutionCost { - write_length: 35, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 311000 - } - ) - } - _ => panic!("unexpected event type"), - } - - // check mined block events - let mined_block_events = test_observer::get_mined_blocks(); - assert!(mined_block_events.len() >= 3); - - // check the tx events in the third mined block - // 2 success: 1 coinbase tx event + 1 contract publish, 1 error (duplicate contract) - let third_block_tx_events = &mined_block_events[2].tx_events; - assert_eq!(third_block_tx_events.len(), 3); - - // coinbase event - match &third_block_tx_events[0] { - TransactionEvent::Success(TransactionSuccessEvent { txid, result, .. }) => { - assert_eq!( - txid.to_string(), - "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6" - ); - assert!(result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap()); - } - _ => panic!("unexpected event type"), - } - - // contract publish event - match &third_block_tx_events[1] { - TransactionEvent::Success(TransactionSuccessEvent { - result, - fee, - execution_cost, - .. - }) => { - assert!(result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap()); - assert_eq!(fee, &600000); - assert_eq!( - execution_cost, - &ExecutionCost { - write_length: 35, - write_count: 2, - read_length: 1, - read_count: 1, - runtime: 311000 - } - ) - } - _ => panic!("unexpected event type"), - } - - // dupe contract error event - match &third_block_tx_events[2] { - TransactionEvent::ProcessingError(TransactionErrorEvent { txid: _, error }) => { - assert_eq!( - error, - "Duplicate contract 'ST3WM51TCWMJYGZS1QFMC28DH5YP86782YGR113C1.small'" - ); - } - _ => panic!("unexpected event type"), - } + eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); + assert_eq!(tip_info.stacks_tip_height, 66); test_observer::clear(); channel.stop_chains_coordinator(); } -/// This test checks that the limit behavior in the miner works as expected for anchored block -/// building. When we first hit the block limit, the limit behavior switches to -/// `CONTRACT_LIMIT_HIT`, during which stx transfers are still allowed, and contract related -/// transactions are skipped. -/// Note: the test is sensitive to the order in which transactions are mined; it is written -/// expecting that transactions are traversed in the order tx_1, tx_2, tx_3, and tx_4. +#[derive(Debug)] +enum Signal { + BootstrapNodeReady, + FollowerNodeReady, + ReplicatingAttachmentsStartTest1, + ReplicatingAttachmentsCheckTest1(u64), + ReplicatingAttachmentsStartTest2, + ReplicatingAttachmentsCheckTest2(u64), +} + #[test] #[ignore] -fn block_limit_hit_integration_test() { +fn atlas_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - // 700 invocations - let max_contract_src = format!( - "(define-private (work) (begin {} 1)) - (define-private (times-100) (begin {} 1)) - (define-private (times-200) (begin (times-100) (times-100) 1)) - (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) - (times-500) (times-200)", - (0..10) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" "), - (0..10) - .map(|_| "(work)".to_string()) - .collect::>() - .join(" "), - ); + let user_1 = StacksPrivateKey::random(); + let initial_balance_user_1 = InitialBalance { + address: to_addr(&user_1).into(), + amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), + }; - // 2900 invocations - let oversize_contract_src = format!( - "(define-private (work) (begin {} 1)) - (define-private (times-100) (begin {} 1)) - (define-private (times-200) (begin (times-100) (times-100) 1)) - (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) - (define-private (times-1000) (begin (times-500) (times-500) 1)) - (times-1000) (times-1000) (times-500) (times-200) (times-200)", - (0..10) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" "), - (0..10) - .map(|_| "(work)".to_string()) - .collect::>() - .join(" "), - ); + // Prepare the config of the bootstrap node + let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); + let bootstrap_node_public_key = { + let keychain = Keychain::default(conf_bootstrap_node.node.seed.clone()); + let mut pk = keychain.generate_op_signer().get_public_key(); + pk.set_compressed(true); + pk.to_hex() + }; + conf_bootstrap_node + .initial_balances + .push(initial_balance_user_1.clone()); - let spender_sk = StacksPrivateKey::random(); - let addr = to_addr(&spender_sk); - let second_spender_sk = StacksPrivateKey::random(); - let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); - let third_spender_sk = StacksPrivateKey::random(); - let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); + conf_bootstrap_node.node.always_use_affirmation_maps = false; - let (mut conf, _miner_account) = neon_integration_test_conf(); + // Prepare the config of the follower node + let (mut conf_follower_node, _) = neon_integration_test_conf(); + let bootstrap_node_url = format!( + "{}@{}", + bootstrap_node_public_key, conf_bootstrap_node.node.p2p_bind + ); + conf_follower_node.node.set_bootstrap_nodes( + bootstrap_node_url, + conf_follower_node.burnchain.chain_id, + conf_follower_node.burnchain.peer_version, + ); + conf_follower_node.node.miner = false; + conf_follower_node + .initial_balances + .push(initial_balance_user_1.clone()); + conf_follower_node + .events_observers + .insert(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, + disable_retries: false, + }); - conf.initial_balances.push(InitialBalance { - address: addr.into(), - amount: 10_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: second_spender_addr.clone(), - amount: 10_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: third_spender_addr.clone(), - amount: 10_000_000, - }); + conf_follower_node.node.always_use_affirmation_maps = false; - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 30000; - conf.node.microblock_frequency = 1000; + // Our 2 nodes will share the bitcoind node + let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + let (bootstrap_node_tx, bootstrap_node_rx) = mpsc::channel(); + let (follower_node_tx, follower_node_rx) = mpsc::channel(); - // included in first block - let tx = make_contract_publish( - &spender_sk, - 0, - 555_000, - conf.burnchain.chain_id, - "over", - &oversize_contract_src, - ); - // contract limit hit; included in second block - let tx_2 = make_contract_publish( - &spender_sk, - 1, - 555_000, - conf.burnchain.chain_id, - "over-2", - &oversize_contract_src, - ); - // skipped over since contract limit was hit; included in second block - let tx_3 = make_contract_publish( - &second_spender_sk, - 0, - 150_000, - conf.burnchain.chain_id, - "max", - &max_contract_src, - ); - // included in first block - let tx_4 = make_stacks_transfer( - &third_spender_sk, - 0, - 180, - conf.burnchain.chain_id, - &PrincipalData::from(addr), - 100, - ); - - test_observer::spawn(); - test_observer::register_any(&mut conf); + let bootstrap_node_thread = thread::spawn(move || { + let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf_bootstrap_node.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf_bootstrap_node.node.rpc_bind); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + btc_regtest_controller.bootstrap_chain(201); - btc_regtest_controller.bootstrap_chain(201); + eprintln!("Chain bootstrapped..."); - eprintln!("Chain bootstrapped..."); + let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let client = reqwest::blocking::Client::new(); + let channel = run_loop.get_coordinator_channel().unwrap(); - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - let channel = run_loop.get_coordinator_channel().unwrap(); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - thread::spawn(move || run_loop.start(None, 0)); + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // Let's setup the follower now. + follower_node_tx + .send(Signal::BootstrapNodeReady) + .expect("Unable to send signal"); - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + match bootstrap_node_rx.recv() { + Ok(Signal::ReplicatingAttachmentsStartTest1) => { + println!("Follower node is ready..."); + } + _ => panic!("Bootstrap node could nod boot. Aborting test."), + }; - // submit all the transactions - let txid_1 = submit_tx(&http_origin, &tx); - let txid_2 = submit_tx(&http_origin, &tx_2); - let txid_3 = submit_tx(&http_origin, &tx_3); - let txid_4 = submit_tx(&http_origin, &tx_4); + // Let's publish a (1) namespace-preorder, (2) namespace-reveal and (3) name-import in this mempool - sleep_ms(5_000); + // (define-public (namespace-preorder (hashed-salted-namespace (buff 20)) + // (stx-to-burn uint)) + let namespace = "passport"; + let salt = "some-salt"; + let salted_namespace = format!("{namespace}{salt}"); + let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); + let tx_1 = make_contract_call( + &user_1, + 0, + 260, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "namespace-preorder", + &[ + Value::buff_from(hashed_namespace.to_bytes().to_vec()).unwrap(), + Value::UInt(1000000000), + ], + ); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(20_000); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx_1.clone()) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if res.status().is_success() { + let res: String = res.json().unwrap(); + assert_eq!( + res, + StacksTransaction::consensus_deserialize(&mut &tx_1[..]) + .unwrap() + .txid() + .to_string() + ); + } else { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(20_000); + // (define-public (namespace-reveal (namespace (buff 20)) + // (namespace-salt (buff 20)) + // (p-func-base uint) + // (p-func-coeff uint) + // (p-func-b1 uint) + // (p-func-b2 uint) + // (p-func-b3 uint) + // (p-func-b4 uint) + // (p-func-b5 uint) + // (p-func-b6 uint) + // (p-func-b7 uint) + // (p-func-b8 uint) + // (p-func-b9 uint) + // (p-func-b10 uint) + // (p-func-b11 uint) + // (p-func-b12 uint) + // (p-func-b13 uint) + // (p-func-b14 uint) + // (p-func-b15 uint) + // (p-func-b16 uint) + // (p-func-non-alpha-discount uint) + // (p-func-no-vowel-discount uint) + // (lifetime uint) + // (namespace-import principal)) + let tx_2 = make_contract_call( + &user_1, + 1, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "namespace-reveal", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(salt.as_bytes().to_vec()).unwrap(), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1000), + Value::Principal(initial_balance_user_1.address.clone()), + ], + ); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(20_000); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx_2.clone()) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if res.status().is_success() { + let res: String = res.json().unwrap(); + assert_eq!( + res, + StacksTransaction::consensus_deserialize(&mut &tx_2[..]) + .unwrap() + .txid() + .to_string() + ); + } else { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } - let res = get_account(&http_origin, &addr); - assert_eq!(res.nonce, 2); + // (define-public (name-import (namespace (buff 20)) + // (name (buff 48)) + // (zonefile-hash (buff 20))) + let zonefile_hex = "facade00"; + let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); + let tx_3 = make_contract_call( + &user_1, + 2, + 500, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-import", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from("johndoe".as_bytes().to_vec()).unwrap(), + Value::Principal(to_addr(&user_1).into()), + Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), + ], + ); - let res = get_account(&http_origin, &second_spender_addr); - assert_eq!(res.nonce, 1); + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx_3), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; - let res = get_account(&http_origin, &third_spender_addr); - assert_eq!(res.nonce, 1); - - let mined_block_events = test_observer::get_blocks(); - assert_eq!(mined_block_events.len(), 5); - - let tx_third_block = mined_block_events[3] - .get("transactions") - .unwrap() - .as_array() - .unwrap(); - assert_eq!(tx_third_block.len(), 3); - let txid_1_exp = tx_third_block[1].get("txid").unwrap().as_str().unwrap(); - let txid_4_exp = tx_third_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{txid_1}"), txid_1_exp); - assert_eq!(format!("0x{txid_4}"), txid_4_exp); - - let tx_fourth_block = mined_block_events[4] - .get("transactions") - .unwrap() - .as_array() - .unwrap(); - assert_eq!(tx_fourth_block.len(), 3); - let txid_2_exp = tx_fourth_block[1].get("txid").unwrap().as_str().unwrap(); - let txid_3_exp = tx_fourth_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{txid_2}"), txid_2_exp); - assert_eq!(format!("0x{txid_3}"), txid_3_exp); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -/// This test checks that the limit behavior in the miner works as expected during microblock -/// building. When we first hit the block limit, the limit behavior switches to -/// `CONTRACT_LIMIT_HIT`, during which stx transfers are still allowed, and contract related -/// transactions are skipped. -/// Note: the test is sensitive to the order in which transactions are mined; it is written -/// expecting that transactions are traversed in the order tx_1, tx_2, tx_3, and tx_4. -#[test] -#[ignore] -fn microblock_limit_hit_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let max_contract_src = format!( - "(define-private (work) (begin {} 1)) - (define-private (times-100) (begin {} 1)) - (define-private (times-200) (begin (times-100) (times-100) 1)) - (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) - (times-500) (times-200)", - (0..3) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" "), - (0..3) - .map(|_| "(work)".to_string()) - .collect::>() - .join(" "), - ); - - let oversize_contract_src = format!( - "(define-private (work) (begin {} 1)) - (define-private (times-100) (begin {} 1)) - (define-private (times-200) (begin (times-100) (times-100) 1)) - (define-private (times-500) (begin (times-200) (times-200) (times-100) 1)) - (define-private (times-1000) (begin (times-500) (times-500) 1)) - (times-1000) (times-1000) (times-500) (times-200) (times-200)", - (0..3) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") 2)", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" "), - (0..3) - .map(|_| "(work)".to_string()) - .collect::>() - .join(" "), - ); - - let spender_sk = StacksPrivateKey::random(); - let addr = to_addr(&spender_sk); - let second_spender_sk = StacksPrivateKey::random(); - let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); - let third_spender_sk = StacksPrivateKey::random(); - let third_spender_addr: PrincipalData = to_addr(&third_spender_sk).into(); - - let (mut conf, _) = neon_integration_test_conf(); - - conf.initial_balances.push(InitialBalance { - address: addr.into(), - amount: 10_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: second_spender_addr.clone(), - amount: 10_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: third_spender_addr.clone(), - amount: 10_000_000, - }); - - conf.node.mine_microblocks = true; - // conf.node.wait_time_for_microblocks = 30000; - conf.node.wait_time_for_microblocks = 1000; - conf.node.microblock_frequency = 1000; - - conf.miner.microblock_attempt_time_ms = i64::MAX as u64; - conf.burnchain.max_rbf = 10_000_000; - conf.node.wait_time_for_blocks = 1_000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - conf.burnchain.epochs = Some(EpochList::new(&[ - StacksEpoch { - epoch_id: StacksEpochId::Epoch10, - start_height: 0, - end_height: 0, - block_limit: BLOCK_LIMIT_MAINNET_20.clone(), - network_epoch: PEER_VERSION_EPOCH_1_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 10_000, - block_limit: ExecutionCost { - write_length: 150000000, - write_count: 50000, - read_length: 1000000000, - read_count: 5000, // make read_count smaller so we hit the read_count limit with a smaller tx. - runtime: 100_000_000_000, - }, - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 10_000, - end_height: 10_002, - block_limit: BLOCK_LIMIT_MAINNET_205.clone(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 10_002, - end_height: 9223372036854775807, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ])); - conf.burnchain.pox_2_activation = Some(10_003); - - // included in the first block - let tx = make_contract_publish_microblock_only( - &spender_sk, - 0, - 555_000, - conf.burnchain.chain_id, - "over", - &oversize_contract_src, - ); - // contract limit hit; included in second block - let tx_2 = make_contract_publish_microblock_only( - &spender_sk, - 1, - 555_000, - conf.burnchain.chain_id, - "over-2", - &oversize_contract_src, - ); - // skipped over since contract limit was hit; included in second block - let tx_3 = make_contract_publish_microblock_only( - &second_spender_sk, - 0, - 150_000, - conf.burnchain.chain_id, - "max", - &max_contract_src, - ); - // included in first block - let tx_4 = make_stacks_transfer_mblock_only( - &third_spender_sk, - 0, - 180, - conf.burnchain.chain_id, - &PrincipalData::from(addr), - 100, - ); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // submit all the transactions - let txid_1 = submit_tx(&http_origin, &tx); - let txid_2 = submit_tx(&http_origin, &tx_2); - let txid_3 = submit_tx(&http_origin, &tx_3); - let txid_4 = submit_tx(&http_origin, &tx_4); - - eprintln!("transactions: {txid_1},{txid_2},{txid_3},{txid_4}"); - - sleep_ms(50_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(50_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(50_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(50_000); - - loop { - let res = get_account(&http_origin, &addr); - if res.nonce < 2 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(50_000); - } else { - break; - } - } - - let res = get_account(&http_origin, &addr); - assert_eq!(res.nonce, 2); - - let res = get_account(&http_origin, &second_spender_addr); - assert_eq!(res.nonce, 1); - - let res = get_account(&http_origin, &third_spender_addr); - assert_eq!(res.nonce, 1); - - let mined_mblock_events = test_observer::get_microblocks(); - assert!(mined_mblock_events.len() >= 2); - - let tx_first_mblock = mined_mblock_events[0] - .get("transactions") - .unwrap() - .as_array() - .unwrap(); - assert_eq!(tx_first_mblock.len(), 2); - let txid_1_exp = tx_first_mblock[0].get("txid").unwrap().as_str().unwrap(); - let txid_4_exp = tx_first_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{txid_1}"), txid_1_exp); - assert_eq!(format!("0x{txid_4}"), txid_4_exp); - - let tx_second_mblock = mined_mblock_events[1] - .get("transactions") - .unwrap() - .as_array() - .unwrap(); - assert_eq!(tx_second_mblock.len(), 2); - let txid_2_exp = tx_second_mblock[0].get("txid").unwrap().as_str().unwrap(); - let txid_3_exp = tx_second_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{txid_2}"), txid_2_exp); - assert_eq!(format!("0x{txid_3}"), txid_3_exp); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -fn block_large_tx_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let small_contract_src = format!( - "(define-public (f) (begin {} (ok 1))) (begin (f))", - (0..700) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" ") - ); - - let oversize_contract_src = format!( - "(define-public (f) (begin {} (ok 1))) (begin (f))", - (0..3500) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" ") - ); - - let spender_sk = StacksPrivateKey::random(); - let spender_addr = to_addr(&spender_sk); - - let (mut conf, miner_account) = neon_integration_test_conf(); - test_observer::spawn(); - test_observer::register_any(&mut conf); - - conf.initial_balances.push(InitialBalance { - address: spender_addr.into(), - amount: 10000000, - }); - - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 30000; - conf.node.microblock_frequency = 1000; - - conf.miner.microblock_attempt_time_ms = i64::MAX as u64; - conf.burnchain.max_rbf = 10_000_000; - conf.node.wait_time_for_blocks = 1_000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - // higher fee for tx means it will get mined first - let tx = make_contract_publish( - &spender_sk, - 0, - 671_000, - conf.burnchain.chain_id, - "small", - &small_contract_src, - ); - let tx_2 = make_contract_publish( - &spender_sk, - 1, - 670_000, - conf.burnchain.chain_id, - "over", - &oversize_contract_src, - ); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10000000); - - let normal_txid = submit_tx(&http_origin, &tx); - let huge_txid = submit_tx(&http_origin, &tx_2); - - eprintln!("Try to mine a too-big tx. Normal = {normal_txid}, TooBig = {huge_txid}"); - next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 1200); - - eprintln!("Finished trying to mine a too-big tx"); - - let dropped_txs = test_observer::get_memtx_drops(); - assert_eq!(dropped_txs.len(), 1); - assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -#[allow(non_snake_case)] -fn microblock_large_tx_integration_test_FLAKY() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let small_contract_src = format!( - "(define-public (f) (begin {} (ok 1))) (begin (f))", - (0..700) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" ") - ); - - // publishing this contract takes up >80% of the read_count budget (which is 50000) - let oversize_contract_src = format!( - "(define-public (f) (begin {} (ok 1))) (begin (f))", - (0..3500) - .map(|_| format!( - "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", - boot_code_id("cost-voting", false), - boot_code_id("costs", false), - boot_code_id("costs", false), - )) - .collect::>() - .join(" ") - ); - - let spender_sk = StacksPrivateKey::random(); - let addr = to_addr(&spender_sk); - - let (mut conf, miner_account) = neon_integration_test_conf(); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - conf.initial_balances.push(InitialBalance { - address: addr.into(), - amount: 10000000, - }); - - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 30000; - conf.node.microblock_frequency = 1000; - - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - - conf.miner.microblock_attempt_time_ms = 1_000; - conf.node.wait_time_for_microblocks = 0; - conf.burnchain.max_rbf = 10_000_000; - conf.node.wait_time_for_blocks = 1_000; - - let tx = make_contract_publish_microblock_only( - &spender_sk, - 0, - 150_000, - conf.burnchain.chain_id, - "small", - &small_contract_src, - ); - let tx_2 = make_contract_publish_microblock_only( - &spender_sk, - 1, - 670_000, - conf.burnchain.chain_id, - "over", - &oversize_contract_src, - ); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.nonce, 1); - assert_eq!(account.balance, 0); - - let account = get_account(&http_origin, &addr); - assert_eq!(account.nonce, 0); - assert_eq!(account.balance, 10000000); - - submit_tx(&http_origin, &tx); - let huge_txid = submit_tx(&http_origin, &tx_2); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(20_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Check that the microblock contains the first tx. - let microblock_events = test_observer::get_microblocks(); - assert!(!microblock_events.is_empty()); - - let microblock = microblock_events[0].clone(); - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); - assert_eq!(transactions.len(), 1); - let status = transactions[0].get("status").unwrap().as_str().unwrap(); - assert_eq!(status, "success"); - - // Check that the tx that triggered TransactionTooLargeError when being processed is dropped - // from the mempool. - let dropped_txs = test_observer::get_memtx_drops(); - assert_eq!(dropped_txs.len(), 1); - assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -fn pox_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let spender_sk = StacksPrivateKey::random(); - let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - - let spender_2_sk = StacksPrivateKey::random(); - let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); - - let spender_3_sk = StacksPrivateKey::random(); - let spender_3_addr: PrincipalData = to_addr(&spender_3_sk).into(); - - let pox_pubkey = Secp256k1PublicKey::from_hex( - "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", - ) - .unwrap(); - let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); - - let pox_2_pubkey = Secp256k1PublicKey::from_private(&StacksPrivateKey::random()); - let pox_2_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_2_pubkey).to_bytes()); - - let pox_2_address = BitcoinAddress::from_bytes_legacy( - BitcoinNetworkType::Testnet, - LegacyBitcoinAddressType::PublicKeyHash, - &Hash160::from_node_public_key(&pox_2_pubkey).to_bytes(), - ) - .unwrap(); - - let (mut conf, miner_account) = neon_integration_test_conf(); - - test_observer::spawn(); - test_observer::register_any(&mut conf); - - // required for testing post-sunset behavior - conf.node.always_use_affirmation_maps = false; - - let first_bal = 6_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); - let second_bal = 2_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); - let third_bal = 2_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS); - let stacked_bal = 1_000_000_000 * u128::from(core::MICROSTACKS_PER_STACKS); - - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: first_bal, - }); - - conf.initial_balances.push(InitialBalance { - address: spender_2_addr, - amount: second_bal, - }); - - conf.initial_balances.push(InitialBalance { - address: spender_3_addr, - amount: third_bal, - }); - - conf.miner.microblock_attempt_time_ms = 1_000; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 1_000; - conf.burnchain.max_rbf = 10_000_000; - conf.node.wait_time_for_blocks = 1_000; - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); - - // reward cycle length = 15, so 10 reward cycle slots + 5 prepare-phase burns - let reward_cycle_len = 15; - let prepare_phase_len = 5; - let pox_constants = PoxConstants::new( - reward_cycle_len, - prepare_phase_len, - 4 * prepare_phase_len / 5, - 5, - 15, - (16 * reward_cycle_len - 1).into(), - (17 * reward_cycle_len).into(), - u32::MAX, - u32::MAX, - u32::MAX, - u32::MAX, - ); - burnchain_config.pox_constants = pox_constants.clone(); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - let burnchain = burnchain_config.clone(); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let sort_height = channel.get_sortitions_processed(); - - // let's query the miner's account nonce: - let account = get_account(&http_origin, &miner_account); - assert_eq!(account.balance, 0); - assert_eq!(account.nonce, 1); - - // and our potential spenders: - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.balance, first_bal as u128); - assert_eq!(account.nonce, 0); - - let pox_info = get_pox_info(&http_origin).unwrap(); - - assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); - assert_eq!(pox_info.first_burnchain_block_height, 0); - assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.stacked_ustx, 0); - assert!(!pox_info.current_cycle.is_pox_active); - assert_eq!(pox_info.next_cycle.stacked_ustx, 0); - assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); - assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 210); - assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 205); - assert_eq!(pox_info.next_cycle.min_increment_ustx, 1250710410920); - assert_eq!( - pox_info.prepare_cycle_length as u32, - pox_constants.prepare_length - ); - assert_eq!( - pox_info.rejection_fraction, - Some(pox_constants.pox_rejection_fraction) - ); - let reward_cycle = burnchain - .block_height_to_reward_cycle(sort_height) - .expect("Expected to be able to get reward cycle"); - assert_eq!(pox_info.reward_cycle_id, reward_cycle); - assert_eq!(pox_info.current_cycle.id, reward_cycle); - assert_eq!(pox_info.next_cycle.id, reward_cycle + 1); - assert_eq!( - pox_info.reward_cycle_length as u32, - pox_constants.reward_cycle_length - ); - assert_eq!(pox_info.total_liquid_supply_ustx, 10005683287360023); - assert_eq!(pox_info.next_reward_cycle_in, 6); - - let tx = make_contract_call( - &spender_sk, - 0, - 260, - conf.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "pox", - "stack-stx", - &[ - Value::UInt(stacked_bal), - execute( - &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), - ClarityVersion::Clarity1, - ) - .unwrap() - .unwrap(), - Value::UInt(sort_height as u128), - Value::UInt(6), - ], - ); - - // okay, let's push that stacking transaction! - submit_tx(&http_origin, &tx); - - let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - test_observer::clear(); - - // now let's mine until the next reward cycle starts ... - while sort_height < ((14 * pox_constants.reward_cycle_length) + 1).into() { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - let pox_info = get_pox_info(&http_origin).unwrap(); - let reward_cycle = burnchain - .block_height_to_reward_cycle(sort_height) - .expect("Expected to be able to get reward cycle"); - - assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); - assert_eq!(pox_info.first_burnchain_block_height, 0); - assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); - assert!(pox_info.pox_activation_threshold_ustx > 1500000000000000); - assert!(!pox_info.current_cycle.is_pox_active); - assert_eq!(pox_info.next_cycle.stacked_ustx, 1000000000000000); - assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); - assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); - assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 220); - assert_eq!( - pox_info.prepare_cycle_length as u32, - pox_constants.prepare_length - ); - assert_eq!( - pox_info.rejection_fraction, - Some(pox_constants.pox_rejection_fraction) - ); - assert_eq!(pox_info.reward_cycle_id, reward_cycle); - assert_eq!(pox_info.current_cycle.id, reward_cycle); - assert_eq!(pox_info.next_cycle.id, reward_cycle + 1); - assert_eq!( - pox_info.reward_cycle_length as u32, - pox_constants.reward_cycle_length - ); - assert_eq!(pox_info.next_reward_cycle_in, 14); - - let blocks_observed = test_observer::get_blocks(); - assert!( - blocks_observed.len() >= 2, - "Blocks observed {} should be >= 2", - blocks_observed.len() - ); - - // look up the return value of our stacking operation... - let mut tested = false; - for block in blocks_observed.iter() { - if tested { - break; - } - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::ContractCall(contract_call) = parsed.payload { - eprintln!("{}", contract_call.function_name.as_str()); - if contract_call.function_name.as_str() == "stack-stx" { - let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); - // should unlock at height 300 (we're in reward cycle 13, lockup starts in reward cycle - // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward - // cycle length of 15 blocks, is a burnchain height of 300) - assert_eq!(parsed.to_string(), - format!("(ok (tuple (lock-amount u1000000000000000) (stacker {spender_addr}) (unlock-burn-height u300)))")); - tested = true; - } - } - } - } - - assert!(tested, "Should have observed stack-stx transaction"); - - // let's stack with spender 2 and spender 3... - - // now let's have sender_2 and sender_3 stack to pox spender_addr 2 in - // two different txs, and make sure that they sum together in the reward set. - - let tx = make_contract_call( - &spender_2_sk, - 0, - 260, - conf.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "pox", - "stack-stx", - &[ - Value::UInt(stacked_bal / 2), - execute( - &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), - ClarityVersion::Clarity1, - ) - .unwrap() - .unwrap(), - Value::UInt(sort_height as u128), - Value::UInt(6), - ], - ); - - // okay, let's push that stacking transaction! - submit_tx(&http_origin, &tx); - - let tx = make_contract_call( - &spender_3_sk, - 0, - 260, - conf.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "pox", - "stack-stx", - &[ - Value::UInt(stacked_bal / 2), - execute( - &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), - ClarityVersion::Clarity1, - ) - .unwrap() - .unwrap(), - Value::UInt(sort_height as u128), - Value::UInt(6), - ], - ); - - submit_tx(&http_origin, &tx); - - // mine until the end of the current reward cycle. - sort_height = channel.get_sortitions_processed(); - while sort_height < ((15 * pox_constants.reward_cycle_length) - 1).into() { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - let pox_info = get_pox_info(&http_origin).unwrap(); - - assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); - assert_eq!(pox_info.first_burnchain_block_height, 0); - assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); - assert!(!pox_info.current_cycle.is_pox_active); - assert_eq!(pox_info.next_cycle.stacked_ustx, 2000000000000000); - assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); - assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); - assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 220); - assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); - assert_eq!( - pox_info.prepare_cycle_length as u32, - pox_constants.prepare_length - ); - assert_eq!( - pox_info.rejection_fraction, - Some(pox_constants.pox_rejection_fraction) - ); - assert_eq!(pox_info.reward_cycle_id, 14); - assert_eq!(pox_info.current_cycle.id, 14); - assert_eq!(pox_info.next_cycle.id, 15); - assert_eq!( - pox_info.reward_cycle_length as u32, - pox_constants.reward_cycle_length - ); - assert_eq!(pox_info.next_reward_cycle_in, 1); - - // we should have received _no_ Bitcoin commitments, because the pox participation threshold - // was not met! - let utxos = btc_regtest_controller.get_all_utxos(&pox_pubkey); - eprintln!("Got UTXOs: {}", utxos.len()); - assert_eq!( - utxos.len(), - 0, - "Should have received no outputs during PoX reward cycle" - ); - - // let's test the reward information in the observer - test_observer::clear(); - - // before sunset - // mine until the end of the next reward cycle, - // the participation threshold now should be met. - while sort_height < ((16 * pox_constants.reward_cycle_length) - 1).into() { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - let pox_info = get_pox_info(&http_origin).unwrap(); - - assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); - assert_eq!(pox_info.first_burnchain_block_height, 0); - assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); - assert_eq!(pox_info.current_cycle.stacked_ustx, 2000000000000000); - assert!(pox_info.current_cycle.is_pox_active); - assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 240); - assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 235); - assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); - assert_eq!(pox_info.next_reward_cycle_in, 1); - - // we should have received _seven_ Bitcoin commitments, because our commitment was 7 * threshold - let utxos = btc_regtest_controller.get_all_utxos(&pox_pubkey); - - eprintln!("Got UTXOs: {}", utxos.len()); - assert_eq!( - utxos.len(), - 7, - "Should have received outputs during PoX reward cycle" - ); - - // we should have received _seven_ Bitcoin commitments to pox_2_pubkey, because our commitment was 7 * threshold - // note: that if the reward set "summing" isn't implemented, this recipient would only have received _6_ slots, - // because each `stack-stx` call only received enough to get 3 slot individually. - let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - - eprintln!("Got UTXOs: {}", utxos.len()); - assert_eq!( - utxos.len(), - 7, - "Should have received outputs during PoX reward cycle" - ); - - let burn_blocks = test_observer::get_burn_blocks(); - let mut recipient_slots: HashMap = HashMap::new(); - - for block in burn_blocks.iter() { - let reward_slot_holders = block - .get("reward_slot_holders") - .unwrap() - .as_array() - .unwrap() - .iter() - .map(|x| x.as_str().unwrap().to_string()); - for holder in reward_slot_holders { - if let Some(current) = recipient_slots.get_mut(&holder) { - *current += 1; - } else { - recipient_slots.insert(holder, 1); - } - } - } - - let pox_1_address = BitcoinAddress::from_bytes_legacy( - BitcoinNetworkType::Testnet, - LegacyBitcoinAddressType::PublicKeyHash, - &Hash160::from_node_public_key(&pox_pubkey).to_bytes(), - ) - .unwrap(); - - assert_eq!(recipient_slots.len(), 2); - assert_eq!( - recipient_slots.get(&format!("{pox_2_address}")).cloned(), - Some(7u64) - ); - assert_eq!( - recipient_slots.get(&format!("{pox_1_address}")).cloned(), - Some(7u64) - ); - - // get the canonical chain tip - let tip_info = get_chain_info(&conf); - - eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); - assert_eq!(tip_info.stacks_tip_height, 36); - - // now let's mine into the sunset - while sort_height < ((17 * pox_constants.reward_cycle_length) - 1).into() { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - // get the canonical chain tip - let tip_info = get_chain_info(&conf); - - eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); - assert_eq!(tip_info.stacks_tip_height, 51); - - let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - - // should receive more rewards during this cycle... - eprintln!("Got UTXOs: {}", utxos.len()); - assert_eq!( - utxos.len(), - 14, - "Should have received more outputs during the sunsetting PoX reward cycle" - ); - - // and after sunset - while sort_height < ((18 * pox_constants.reward_cycle_length) - 1).into() { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); - - // should *not* receive more rewards during the after sunset cycle... - eprintln!("Got UTXOs: {}", utxos.len()); - assert_eq!( - utxos.len(), - 14, - "Should have received no more outputs after sunset PoX reward cycle" - ); - - // should have progressed the chain, though! - // get the canonical chain tip - let tip_info = get_chain_info(&conf); - - eprintln!("Stacks tip is now {}", tip_info.stacks_tip_height); - assert_eq!(tip_info.stacks_tip_height, 66); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[derive(Debug)] -enum Signal { - BootstrapNodeReady, - FollowerNodeReady, - ReplicatingAttachmentsStartTest1, - ReplicatingAttachmentsCheckTest1(u64), - ReplicatingAttachmentsStartTest2, - ReplicatingAttachmentsCheckTest2(u64), -} - -#[test] -#[ignore] -fn atlas_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let user_1 = StacksPrivateKey::random(); - let initial_balance_user_1 = InitialBalance { - address: to_addr(&user_1).into(), - amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), - }; - - // Prepare the config of the bootstrap node - let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); - let bootstrap_node_public_key = { - let keychain = Keychain::default(conf_bootstrap_node.node.seed.clone()); - let mut pk = keychain.generate_op_signer().get_public_key(); - pk.set_compressed(true); - pk.to_hex() - }; - conf_bootstrap_node - .initial_balances - .push(initial_balance_user_1.clone()); - - conf_bootstrap_node.node.always_use_affirmation_maps = false; - - // Prepare the config of the follower node - let (mut conf_follower_node, _) = neon_integration_test_conf(); - let bootstrap_node_url = format!( - "{}@{}", - bootstrap_node_public_key, conf_bootstrap_node.node.p2p_bind - ); - conf_follower_node.node.set_bootstrap_nodes( - bootstrap_node_url, - conf_follower_node.burnchain.chain_id, - conf_follower_node.burnchain.peer_version, - ); - conf_follower_node.node.miner = false; - conf_follower_node - .initial_balances - .push(initial_balance_user_1.clone()); - conf_follower_node - .events_observers - .insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - disable_retries: false, - }); - - conf_follower_node.node.always_use_affirmation_maps = false; - - // Our 2 nodes will share the bitcoind node - let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let (bootstrap_node_tx, bootstrap_node_rx) = mpsc::channel(); - let (follower_node_tx, follower_node_rx) = mpsc::channel(); - - let bootstrap_node_thread = thread::spawn(move || { - let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_bootstrap_node.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - let http_origin = format!("http://{}", &conf_bootstrap_node.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // Let's setup the follower now. - follower_node_tx - .send(Signal::BootstrapNodeReady) - .expect("Unable to send signal"); - - match bootstrap_node_rx.recv() { - Ok(Signal::ReplicatingAttachmentsStartTest1) => { - println!("Follower node is ready..."); - } - _ => panic!("Bootstrap node could nod boot. Aborting test."), - }; - - // Let's publish a (1) namespace-preorder, (2) namespace-reveal and (3) name-import in this mempool - - // (define-public (namespace-preorder (hashed-salted-namespace (buff 20)) - // (stx-to-burn uint)) - let namespace = "passport"; - let salt = "some-salt"; - let salted_namespace = format!("{namespace}{salt}"); - let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); - let tx_1 = make_contract_call( - &user_1, - 0, - 260, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "namespace-preorder", - &[ - Value::buff_from(hashed_namespace.to_bytes().to_vec()).unwrap(), - Value::UInt(1000000000), - ], - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_1.clone()) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &tx_1[..]) - .unwrap() - .txid() - .to_string() - ); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - // (define-public (namespace-reveal (namespace (buff 20)) - // (namespace-salt (buff 20)) - // (p-func-base uint) - // (p-func-coeff uint) - // (p-func-b1 uint) - // (p-func-b2 uint) - // (p-func-b3 uint) - // (p-func-b4 uint) - // (p-func-b5 uint) - // (p-func-b6 uint) - // (p-func-b7 uint) - // (p-func-b8 uint) - // (p-func-b9 uint) - // (p-func-b10 uint) - // (p-func-b11 uint) - // (p-func-b12 uint) - // (p-func-b13 uint) - // (p-func-b14 uint) - // (p-func-b15 uint) - // (p-func-b16 uint) - // (p-func-non-alpha-discount uint) - // (p-func-no-vowel-discount uint) - // (lifetime uint) - // (namespace-import principal)) - let tx_2 = make_contract_call( - &user_1, - 1, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "namespace-reveal", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(salt.as_bytes().to_vec()).unwrap(), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1000), - Value::Principal(initial_balance_user_1.address.clone()), - ], - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_2.clone()) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &tx_2[..]) - .unwrap() - .txid() - .to_string() - ); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - // (define-public (name-import (namespace (buff 20)) - // (name (buff 48)) - // (zonefile-hash (buff 20))) - let zonefile_hex = "facade00"; - let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); - let tx_3 = make_contract_call( - &user_1, - 2, - 500, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-import", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from("johndoe".as_bytes().to_vec()).unwrap(), - Value::Principal(to_addr(&user_1).into()), - Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), - ], - ); - - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx_3), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - // From there, let's mine these transaction, and build more blocks. - let mut sort_height = channel.get_sortitions_processed(); - let few_blocks = sort_height + 10; - - while sort_height < few_blocks { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - // Then check that the follower is correctly replicating the attachment - follower_node_tx - .send(Signal::ReplicatingAttachmentsCheckTest1(sort_height)) - .expect("Unable to send signal"); - - match bootstrap_node_rx.recv() { - Ok(Signal::ReplicatingAttachmentsStartTest2) => { - println!("Follower node is ready..."); - } - _ => panic!("Bootstrap node could nod boot. Aborting test."), - }; - - // From there, let's mine these transaction, and build more blocks. - let mut sort_height = channel.get_sortitions_processed(); - let few_blocks = sort_height + 10; - - while sort_height < few_blocks { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - // Poll GET v2/attachments/ - for i in 1..10 { - let mut attachments_did_sync = false; - let mut timeout = 60; - while !attachments_did_sync { - let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); - let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); - let res = client - .get(&path) - .header("Content-Type", "application/json") - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let attachment_response: GetAttachmentResponse = res.json().unwrap(); - assert_eq!(attachment_response.attachment.content, zonefile_hex); - attachments_did_sync = true; - } else { - timeout -= 1; - if timeout == 0 { - panic!("Failed syncing 9 attachments between 2 neon runloops within 60s (failed at {}) - Something is wrong", &to_hex(&zonefile_hex)); - } - eprintln!("Attachment {} not sync'd yet", bytes_to_hex(&zonefile_hex)); - thread::sleep(Duration::from_millis(1000)); - } - } - } - - // Then check that the follower is correctly replicating the attachment - follower_node_tx - .send(Signal::ReplicatingAttachmentsCheckTest2(sort_height)) - .expect("Unable to send signal"); - - channel.stop_chains_coordinator(); - }); - - // Start the attached observer - test_observer::spawn(); - - // The bootstrap node mined a few blocks and is ready, let's setup this node. - match follower_node_rx.recv() { - Ok(Signal::BootstrapNodeReady) => { - println!("Booting follower node..."); - } - _ => panic!("Bootstrap node could nod boot. Aborting test."), - }; - - let burnchain_config = Burnchain::regtest(&conf_follower_node.get_burn_db_path()); - let chain_id = conf_follower_node.burnchain.chain_id; - let http_origin = format!("http://{}", &conf_follower_node.node.rpc_bind); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf_follower_node.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // Follower node is ready, the bootstrap node will now handover - bootstrap_node_tx - .send(Signal::ReplicatingAttachmentsStartTest1) - .expect("Unable to send signal"); - - // The bootstrap node published and mined a transaction that includes an attachment. - // Lets observe the attachments replication kicking in. - let target_height = match follower_node_rx.recv() { - Ok(Signal::ReplicatingAttachmentsCheckTest1(target_height)) => target_height, - _ => panic!("Bootstrap node could nod boot. Aborting test."), - }; - - let mut sort_height = channel.get_sortitions_processed(); - while sort_height < target_height { - wait_for_runloop(&blocks_processed); - sort_height = channel.get_sortitions_processed(); - } - - // Now wait for the node to sync the attachment - let mut attachments_did_sync = false; - let mut timeout = 60; - while !attachments_did_sync { - let zonefile_hex = "facade00"; - let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); - let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); - let res = client - .get(&path) - .header("Content-Type", "application/json") - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - eprintln!("Success syncing attachment - {}", res.text().unwrap()); - attachments_did_sync = true; - } else { - timeout -= 1; - if timeout == 0 { - panic!("Failed syncing 1 attachments between 2 neon runloops within 60s - Something is wrong"); - } - eprintln!("Attachment {zonefile_hex} not sync'd yet"); - thread::sleep(Duration::from_millis(1000)); - } - } - - // Test 2: 9 transactions are posted to the follower. - // We want to make sure that the miner is able to - // 1) mine these transactions - // 2) retrieve the attachments staged on the follower node. - // 3) ensure that the follower is also instantiating the attachments after - // executing the transactions, once mined. - let namespace = "passport"; - for i in 1..10 { - let user = StacksPrivateKey::random(); - let zonefile_hex = format!("facade0{i}"); - let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - let name = format!("johndoe{i}"); - let tx = make_contract_call( - &user_1, - 2 + i, - 500, - chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-import", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(name.as_bytes().to_vec()).unwrap(), - Value::Principal(to_addr(&user).into()), - Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), - ], - ); - - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - } - - bootstrap_node_tx - .send(Signal::ReplicatingAttachmentsStartTest2) - .expect("Unable to send signal"); - - let target_height = match follower_node_rx.recv() { - Ok(Signal::ReplicatingAttachmentsCheckTest2(target_height)) => target_height, - _ => panic!("Bootstrap node could not boot. Aborting test."), - }; - - let mut sort_height = channel.get_sortitions_processed(); - while sort_height < target_height { - wait_for_runloop(&blocks_processed); - sort_height = channel.get_sortitions_processed(); - } - - // Poll GET v2/attachments/ - for i in 1..10 { - let mut attachments_did_sync = false; - let mut timeout = 60; - while !attachments_did_sync { - let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); - let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); - let res = client - .get(&path) - .header("Content-Type", "application/json") - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let attachment_response: GetAttachmentResponse = res.json().unwrap(); - assert_eq!(attachment_response.attachment.content, zonefile_hex); - attachments_did_sync = true; - } else { - timeout -= 1; - if timeout == 0 { - panic!("Failed syncing 9 attachments between 2 neon runloops within 60s (failed at {}) - Something is wrong", &to_hex(&zonefile_hex)); - } - eprintln!("Attachment {} not sync'd yet", bytes_to_hex(&zonefile_hex)); - thread::sleep(Duration::from_millis(1000)); - } - } - } - - // Ensure that we the attached sidecar was able to receive a total of 10 attachments - // This last assertion is flacky for some reason, it does not worth bullying the CI or disabling this whole test - // We're using an inequality as a best effort, to make sure that **some** attachments were received. - assert!(!test_observer::get_attachments().is_empty()); - test_observer::clear(); - channel.stop_chains_coordinator(); - - bootstrap_node_thread.join().unwrap(); -} - -#[test] -#[ignore] -fn antientropy_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let user_1 = StacksPrivateKey::random(); - let initial_balance_user_1 = InitialBalance { - address: to_addr(&user_1).into(), - amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), - }; - - // Prepare the config of the bootstrap node - let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); - let bootstrap_node_public_key = { - let keychain = Keychain::default(conf_bootstrap_node.node.seed.clone()); - let mut pk = keychain.generate_op_signer().get_public_key(); - pk.set_compressed(true); - pk.to_hex() - }; - conf_bootstrap_node - .initial_balances - .push(initial_balance_user_1.clone()); - conf_bootstrap_node.connection_options.antientropy_retry = 10; // move this along -- do anti-entropy protocol once every 10 seconds - conf_bootstrap_node.connection_options.antientropy_public = true; // always push blocks, even if we're not NAT'ed - conf_bootstrap_node.connection_options.max_block_push = 1000; - conf_bootstrap_node.connection_options.max_microblock_push = 1000; - - conf_bootstrap_node.node.mine_microblocks = true; - conf_bootstrap_node.miner.microblock_attempt_time_ms = 2_000; - conf_bootstrap_node.node.wait_time_for_microblocks = 0; - conf_bootstrap_node.node.microblock_frequency = 0; - conf_bootstrap_node.miner.first_attempt_time_ms = 1_000_000; - conf_bootstrap_node.miner.subsequent_attempt_time_ms = 1_000_000; - conf_bootstrap_node.burnchain.max_rbf = 1000000; - conf_bootstrap_node.node.wait_time_for_blocks = 1_000; - - conf_bootstrap_node.node.always_use_affirmation_maps = false; - - // Prepare the config of the follower node - let (mut conf_follower_node, _) = neon_integration_test_conf(); - let bootstrap_node_url = format!( - "{bootstrap_node_public_key}@{}", - conf_bootstrap_node.node.p2p_bind - ); - conf_follower_node.connection_options.disable_block_download = true; - conf_follower_node.node.set_bootstrap_nodes( - bootstrap_node_url, - conf_follower_node.burnchain.chain_id, - conf_follower_node.burnchain.peer_version, - ); - conf_follower_node.node.miner = false; - conf_follower_node - .initial_balances - .push(initial_balance_user_1); - conf_follower_node - .events_observers - .insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1000, - disable_retries: false, - }); - - conf_follower_node.node.mine_microblocks = true; - conf_follower_node.miner.microblock_attempt_time_ms = 2_000; - conf_follower_node.node.wait_time_for_microblocks = 0; - conf_follower_node.node.microblock_frequency = 0; - conf_follower_node.miner.first_attempt_time_ms = 1_000_000; - conf_follower_node.miner.subsequent_attempt_time_ms = 1_000_000; - conf_follower_node.burnchain.max_rbf = 1000000; - conf_follower_node.node.wait_time_for_blocks = 1_000; - - conf_follower_node.node.always_use_affirmation_maps = false; - - // Our 2 nodes will share the bitcoind node - let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let (bootstrap_node_tx, bootstrap_node_rx) = mpsc::channel(); - let (follower_node_tx, follower_node_rx) = mpsc::channel(); - - let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); - let target_height = 3 + (3 * burnchain_config.pox_constants.reward_cycle_length); - - let bootstrap_node_thread = thread::spawn(move || { - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_bootstrap_node.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - for i in 0..(target_height - 3) { - eprintln!("Mine block {i}"); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {sort_height}"); - } - - // Let's setup the follower now. - follower_node_tx - .send(Signal::BootstrapNodeReady) - .expect("Unable to send signal"); - - eprintln!("Bootstrap node informed follower that it's ready; waiting for acknowledgement"); - - // wait for bootstrap node to terminate - match bootstrap_node_rx.recv() { - Ok(Signal::FollowerNodeReady) => { - println!("Follower has finished"); - } - Ok(x) => { - panic!("Follower gave a bad signal: {x:?}"); - } - Err(e) => { - panic!("Failed to recv: {e:?}"); - } - }; - - channel.stop_chains_coordinator(); - }); - - // Start the attached observer - test_observer::spawn(); - - // The bootstrap node mined a few blocks and is ready, let's setup this node. - match follower_node_rx.recv() { - Ok(Signal::BootstrapNodeReady) => { - println!("Booting follower node..."); - } - _ => panic!("Bootstrap node could not boot. Aborting test."), - }; - - let burnchain_config = Burnchain::regtest(&conf_follower_node.get_burn_db_path()); - let http_origin = format!("http://{}", &conf_follower_node.node.rpc_bind); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf_follower_node.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - - let thread_burnchain_config = burnchain_config.clone(); - thread::spawn(move || run_loop.start(Some(thread_burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - let mut sort_height = channel.get_sortitions_processed(); - while sort_height < (target_height + 200) as u64 { - eprintln!( - "Follower sortition is {sort_height}, target is {}", - target_height + 200 - ); - wait_for_runloop(&blocks_processed); - sort_height = channel.get_sortitions_processed(); - sleep_ms(1000); - } - - eprintln!("Follower booted up; waiting for blocks"); - - // wait for block height to reach target - let mut tip_height = get_chain_tip_height(&http_origin); - eprintln!( - "Follower Stacks tip height is {tip_height}, wait until {tip_height} >= {target_height} - 3" - ); - - let btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_follower_node.clone(), - None, - Some(burnchain_config), - None, - ); - - let mut burnchain_deadline = get_epoch_time_secs() + 60; - while tip_height < (target_height - 3) as u64 { - sleep_ms(1000); - tip_height = get_chain_tip_height(&http_origin); - - eprintln!("Follower Stacks tip height is {tip_height}"); - - if burnchain_deadline < get_epoch_time_secs() { - burnchain_deadline = get_epoch_time_secs() + 60; - btc_regtest_controller.build_next_block(1); - } - } - - bootstrap_node_tx - .send(Signal::FollowerNodeReady) - .expect("Unable to send signal"); - bootstrap_node_thread.join().unwrap(); - - eprintln!("Follower node finished"); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[allow(clippy::too_many_arguments)] -fn wait_for_mined( - btc_regtest_controller: &mut BitcoinRegtestController, - blocks_processed: &Arc, - http_origin: &str, - users: &[StacksPrivateKey], - account_before_nonces: &[u64], - batch_size: usize, - batches: usize, - index_block_hashes: &mut Vec, -) { - let mut all_mined_vec = vec![false; batches * batch_size]; - let mut account_after_nonces = vec![0; batches * batch_size]; - let mut all_mined = false; - for _k in 0..10 { - next_block_and_wait(btc_regtest_controller, blocks_processed); - sleep_ms(10_000); - - let (ch, bhh) = get_chain_tip(http_origin); - let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - - if let Some(last_ibh) = index_block_hashes.last() { - if *last_ibh != ibh { - index_block_hashes.push(ibh); - eprintln!("Tip is now {ibh}"); - } - } - - for j in 0..batches * batch_size { - let account_after = get_account(http_origin, &to_addr(&users[j])); - let account_after_nonce = account_after.nonce; - account_after_nonces[j] = account_after_nonce; - - if account_before_nonces[j] < account_after_nonce { - all_mined_vec[j] = true; - } - } - - all_mined = all_mined_vec.iter().all(|elem| *elem); - if all_mined { - break; - } - } - if !all_mined { - panic!( - "Failed to mine all transactions: nonces = {account_after_nonces:?}, expected {account_before_nonces:?} + {batch_size}" - ); - } -} - -#[test] -#[ignore] -fn atlas_stress_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let mut initial_balances = vec![]; - let mut users = vec![]; - - let batches = 5; - let batch_size = 20; - - for _i in 0..(2 * batches * batch_size + 1) { - let user = StacksPrivateKey::random(); - let initial_balance_user = InitialBalance { - address: to_addr(&user).into(), - amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), - }; - users.push(user); - initial_balances.push(initial_balance_user); - } - - // Prepare the config of the bootstrap node - let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); - conf_bootstrap_node - .initial_balances - .append(&mut initial_balances.clone()); - - conf_bootstrap_node.miner.first_attempt_time_ms = u64::MAX; - conf_bootstrap_node.miner.subsequent_attempt_time_ms = u64::MAX; - - conf_bootstrap_node.node.mine_microblocks = true; - conf_bootstrap_node.miner.microblock_attempt_time_ms = 2_000; - conf_bootstrap_node.node.wait_time_for_microblocks = 0; - conf_bootstrap_node.node.microblock_frequency = 0; - conf_bootstrap_node.miner.first_attempt_time_ms = 1_000_000; - conf_bootstrap_node.miner.subsequent_attempt_time_ms = 2_000_000; - conf_bootstrap_node.burnchain.max_rbf = 1000000; - conf_bootstrap_node.node.wait_time_for_blocks = 1_000; - - conf_bootstrap_node.node.always_use_affirmation_maps = false; - - let user_1 = users.pop().unwrap(); - let initial_balance_user_1 = initial_balances.pop().unwrap(); - - // Start the attached observer - test_observer::spawn(); - - let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf_bootstrap_node.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - let http_origin = format!("http://{}", &conf_bootstrap_node.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let client = reqwest::blocking::Client::new(); - - thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let mut index_block_hashes = vec![]; - - // Let's publish a (1) namespace-preorder, (2) namespace-reveal and (3) name-import in this mempool - - // (define-public (namespace-preorder (hashed-salted-namespace (buff 20)) - // (stx-to-burn uint)) - let namespace = "passport"; - let salt = "some-salt"; - let salted_namespace = format!("{namespace}{salt}"); - let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); - let tx_1 = make_contract_call( - &user_1, - 0, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "namespace-preorder", - &[ - Value::buff_from(hashed_namespace.to_bytes().to_vec()).unwrap(), - Value::UInt(1000000000), - ], - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_1.clone()) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &tx_1[..]) - .unwrap() - .txid() - .to_string() - ); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - // (define-public (namespace-reveal (namespace (buff 20)) - // (namespace-salt (buff 20)) - // (p-func-base uint) - // (p-func-coeff uint) - // (p-func-b1 uint) - // (p-func-b2 uint) - // (p-func-b3 uint) - // (p-func-b4 uint) - // (p-func-b5 uint) - // (p-func-b6 uint) - // (p-func-b7 uint) - // (p-func-b8 uint) - // (p-func-b9 uint) - // (p-func-b10 uint) - // (p-func-b11 uint) - // (p-func-b12 uint) - // (p-func-b13 uint) - // (p-func-b14 uint) - // (p-func-b15 uint) - // (p-func-b16 uint) - // (p-func-non-alpha-discount uint) - // (p-func-no-vowel-discount uint) - // (lifetime uint) - // (namespace-import principal)) - let tx_2 = make_contract_call( - &user_1, - 1, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "namespace-reveal", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(salt.as_bytes().to_vec()).unwrap(), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1), - Value::UInt(1000), - Value::Principal(initial_balance_user_1.address), - ], - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_2.clone()) - .send() - .unwrap(); - eprintln!("{:#?}", res); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &tx_2[..]) - .unwrap() - .txid() - .to_string() - ); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - let mut mined_namespace_reveal = false; - for _j in 0..10 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); - - let account_after = get_account(&http_origin, &to_addr(&user_1)); - if account_after.nonce == 2 { - mined_namespace_reveal = true; - break; - } - } - assert!( - mined_namespace_reveal, - "Did not mine namespace preorder or reveal" - ); - - // make a _ton_ of name-imports - for i in 0..batches { - let account_before = get_account(&http_origin, &to_addr(&user_1)); - - for j in 0..batch_size { - // (define-public (name-import (namespace (buff 20)) - // (name (buff 48)) - // (zonefile-hash (buff 20))) - let zonefile_hex = format!("facade00{:04x}{:04x}{:04x}", batch_size * i + j, i, j); - let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - - let tx_3 = make_contract_call( - &user_1, - 2 + (batch_size * i + j) as u64, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-import", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(format!("johndoe{}", i * batch_size + j).as_bytes().to_vec()) - .unwrap(), - Value::Principal(to_addr(&users[i * batch_size + j]).into()), - Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), - ], - ); - - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx_3), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - } - - // wait for them all to be mined - let mut all_mined = false; - let account_after_nonce = 0; - for _j in 0..10 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); - - let (ch, bhh) = get_chain_tip(&http_origin); - let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - index_block_hashes.push(ibh); - - let account_after = get_account(&http_origin, &to_addr(&user_1)); - let account_after_nonce = account_after.nonce; - if account_before.nonce + (batch_size as u64) <= account_after_nonce { - all_mined = true; - break; - } + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/json") + .body(body) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); } - assert!( - all_mined, - "Failed to mine all transactions: nonce = {account_after_nonce}, expected {}", - account_before.nonce + (batch_size as u64) - ); - } - - // launch namespace - // (define-public (namespace-ready (namespace (buff 20))) - let namespace = "passport"; - let tx_4 = make_contract_call( - &user_1, - 2 + (batch_size as u64) * (batches as u64), - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "namespace-ready", - &[Value::buff_from(namespace.as_bytes().to_vec()).unwrap()], - ); - - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_4) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - - let mut mined_namespace_ready = false; - for _j in 0..10 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(10_000); - let (ch, bhh) = get_chain_tip(&http_origin); - let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - index_block_hashes.push(ibh); + // From there, let's mine these transaction, and build more blocks. + let mut sort_height = channel.get_sortitions_processed(); + let few_blocks = sort_height + 10; - let account_after = get_account(&http_origin, &to_addr(&user_1)); - if account_after.nonce == 2 + (batch_size as u64) * (batches as u64) { - mined_namespace_ready = true; - break; + while sort_height < few_blocks { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); } - } - assert!(mined_namespace_ready, "Did not mine namespace ready"); - - // make a _ton_ of preorders - { - let mut account_before_nonces = vec![0; batches * batch_size]; - for j in 0..batches * batch_size { - let account_before = - get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); - account_before_nonces[j] = account_before.nonce; - - let fqn = format!("janedoe{j}.passport"); - let fqn_bytes = fqn.as_bytes().to_vec(); - let salt = format!("{:04x}", j); - let salt_bytes = salt.as_bytes().to_vec(); - let mut hash_data = fqn_bytes.clone(); - hash_data.append(&mut salt_bytes.clone()); - - let salted_hash = Hash160::from_data(&hash_data); - - let tx_5 = make_contract_call( - &users[batches * batch_size + j], - 0, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-preorder", - &[ - Value::buff_from(salted_hash.0.to_vec()).unwrap(), - Value::UInt(500), - ], - ); - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx_5.clone()) - .send() - .unwrap(); + // Then check that the follower is correctly replicating the attachment + follower_node_tx + .send(Signal::ReplicatingAttachmentsCheckTest1(sort_height)) + .expect("Unable to send signal"); - eprintln!( - "sent preorder for {}:\n{res:#?}", - &to_addr(&users[batches * batch_size + j]) - ); - if !res.status().is_success() { - panic!(""); + match bootstrap_node_rx.recv() { + Ok(Signal::ReplicatingAttachmentsStartTest2) => { + println!("Follower node is ready..."); } - } - - wait_for_mined( - &mut btc_regtest_controller, - &blocks_processed, - &http_origin, - &users[batches * batch_size..], - &account_before_nonces, - batch_size, - batches, - &mut index_block_hashes, - ); - } - - // make a _ton_ of registers - { - let mut account_before_nonces = vec![0; batches * batch_size]; - for j in 0..batches * batch_size { - let account_before = - get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); - account_before_nonces[j] = account_before.nonce; - - let name = format!("janedoe{j}"); - let salt = format!("{j:04x}"); - - let zonefile_hex = format!("facade01{j:04x}"); - let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - - let tx_6 = make_contract_call( - &users[batches * batch_size + j], - 1, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-register", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(name.as_bytes().to_vec()).unwrap(), - Value::buff_from(salt.as_bytes().to_vec()).unwrap(), - Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), - ], - ); + _ => panic!("Bootstrap node could nod boot. Aborting test."), + }; - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx_6), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; + // From there, let's mine these transaction, and build more blocks. + let mut sort_height = channel.get_sortitions_processed(); + let few_blocks = sort_height + 10; - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } + while sort_height < few_blocks { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); } - wait_for_mined( - &mut btc_regtest_controller, - &blocks_processed, - &http_origin, - &users[batches * batch_size..], - &account_before_nonces, - batch_size, - batches, - &mut index_block_hashes, - ); - } - - // make a _ton_ of updates - { - let mut account_before_nonces = vec![0; batches * batch_size]; - for j in 0..batches * batch_size { - let account_before = - get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); - account_before_nonces[j] = account_before.nonce; + // Poll GET v2/attachments/ + for i in 1..10 { + let mut attachments_did_sync = false; + let mut timeout = 60; + while !attachments_did_sync { + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); + let hashed_zonefile = Hash160::from_data(&zonefile_hex); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); + let res = client + .get(&path) + .header("Content-Type", "application/json") + .send() + .unwrap(); + eprintln!("{res:#?}"); + if res.status().is_success() { + let attachment_response: GetAttachmentResponse = res.json().unwrap(); + assert_eq!(attachment_response.attachment.content, zonefile_hex); + attachments_did_sync = true; + } else { + timeout -= 1; + if timeout == 0 { + panic!("Failed syncing 9 attachments between 2 neon runloops within 60s (failed at {}) - Something is wrong", &to_hex(&zonefile_hex)); + } + eprintln!("Attachment {} not sync'd yet", bytes_to_hex(&zonefile_hex)); + thread::sleep(Duration::from_millis(1000)); + } + } + } - let name = format!("janedoe{j}"); - let zonefile_hex = format!("facade02{j:04x}"); - let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); + // Then check that the follower is correctly replicating the attachment + follower_node_tx + .send(Signal::ReplicatingAttachmentsCheckTest2(sort_height)) + .expect("Unable to send signal"); - let tx_7 = make_contract_call( - &users[batches * batch_size + j], - 2, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-update", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(name.as_bytes().to_vec()).unwrap(), - Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), - ], - ); + channel.stop_chains_coordinator(); + }); - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx_7), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; + // Start the attached observer + test_observer::spawn(); - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } + // The bootstrap node mined a few blocks and is ready, let's setup this node. + match follower_node_rx.recv() { + Ok(Signal::BootstrapNodeReady) => { + println!("Booting follower node..."); } + _ => panic!("Bootstrap node could nod boot. Aborting test."), + }; - wait_for_mined( - &mut btc_regtest_controller, - &blocks_processed, - &http_origin, - &users[batches * batch_size..], - &account_before_nonces, - batch_size, - batches, - &mut index_block_hashes, - ); - } + let burnchain_config = Burnchain::regtest(&conf_follower_node.get_burn_db_path()); + let chain_id = conf_follower_node.burnchain.chain_id; + let http_origin = format!("http://{}", &conf_follower_node.node.rpc_bind); - // make a _ton_ of renewals - { - let mut account_before_nonces = vec![0; batches * batch_size]; - for j in 0..batches * batch_size { - let account_before = - get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); - account_before_nonces[j] = account_before.nonce; + eprintln!("Chain bootstrapped..."); - let name = format!("janedoe{j}"); - let zonefile_hex = format!("facade03{j:04x}"); - let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); + let mut run_loop = neon::RunLoop::new(conf_follower_node.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let client = reqwest::blocking::Client::new(); + let channel = run_loop.get_coordinator_channel().unwrap(); - let tx_8 = make_contract_call( - &users[batches * batch_size + j], - 3, - 1000, - conf_bootstrap_node.burnchain.chain_id, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "bns", - "name-renewal", - &[ - Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), - Value::buff_from(name.as_bytes().to_vec()).unwrap(), - Value::UInt(500), - Value::none(), - Value::some(Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap()) - .unwrap(), - ], - ); + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - let body = { - let content = PostTransactionRequestBody { - tx: bytes_to_hex(&tx_8), - attachment: Some(zonefile_hex.to_string()), - }; - serde_json::to_vec(&json!(content)).unwrap() - }; + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - let path = format!("{http_origin}/v2/transactions"); - let res = client - .post(&path) - .header("Content-Type", "application/json") - .body(body) - .send() - .unwrap(); - eprintln!("{res:#?}"); - if !res.status().is_success() { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } - } + // Follower node is ready, the bootstrap node will now handover + bootstrap_node_tx + .send(Signal::ReplicatingAttachmentsStartTest1) + .expect("Unable to send signal"); - wait_for_mined( - &mut btc_regtest_controller, - &blocks_processed, - &http_origin, - &users[batches * batch_size..], - &account_before_nonces, - batch_size, - batches, - &mut index_block_hashes, - ); + // The bootstrap node published and mined a transaction that includes an attachment. + // Lets observe the attachments replication kicking in. + let target_height = match follower_node_rx.recv() { + Ok(Signal::ReplicatingAttachmentsCheckTest1(target_height)) => target_height, + _ => panic!("Bootstrap node could nod boot. Aborting test."), + }; + + let mut sort_height = channel.get_sortitions_processed(); + while sort_height < target_height { + wait_for_runloop(&blocks_processed); + sort_height = channel.get_sortitions_processed(); } - // find all attachment indexes and make sure we can get them - let mut attachment_indexes = HashMap::new(); - let mut attachment_hashes = HashMap::new(); - { - let atlasdb_path = conf_bootstrap_node.get_atlas_db_file_path(); - let atlasdb = AtlasDB::connect(AtlasConfig::new(false), &atlasdb_path, false).unwrap(); - for ibh in index_block_hashes.iter() { - let indexes = query_rows::( - &atlasdb.conn, - "SELECT attachment_index FROM attachment_instances WHERE index_block_hash = ?1", - &[ibh], - ) + // Now wait for the node to sync the attachment + let mut attachments_did_sync = false; + let mut timeout = 60; + while !attachments_did_sync { + let zonefile_hex = "facade00"; + let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); + let res = client + .get(&path) + .header("Content-Type", "application/json") + .send() .unwrap(); - if !indexes.is_empty() { - attachment_indexes.insert(*ibh, indexes.clone()); - } - - for index in indexes.iter() { - let mut hashes = query_row_columns::( - &atlasdb.conn, - "SELECT content_hash FROM attachment_instances WHERE index_block_hash = ?1 AND attachment_index = ?2", - params![ibh, u64_to_sql(*index).unwrap()], - "content_hash") - .unwrap(); - if !hashes.is_empty() { - assert_eq!(hashes.len(), 1); - attachment_hashes.insert((*ibh, *index), hashes.pop()); - } + eprintln!("{res:#?}"); + if res.status().is_success() { + eprintln!("Success syncing attachment - {}", res.text().unwrap()); + attachments_did_sync = true; + } else { + timeout -= 1; + if timeout == 0 { + panic!("Failed syncing 1 attachments between 2 neon runloops within 60s - Something is wrong"); } + eprintln!("Attachment {zonefile_hex} not sync'd yet"); + thread::sleep(Duration::from_millis(1000)); } } - eprintln!("attachment_indexes = {attachment_indexes:?}"); - - let max_request_time_ms = 100; - - for (ibh, attachments) in attachment_indexes.iter() { - let l = attachments.len(); - for i in 0..(l / MAX_ATTACHMENT_INV_PAGES_PER_REQUEST + 1) { - if i * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST >= l { - break; - } - - let attachments_batch = attachments[i * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST - ..cmp::min((i + 1) * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, l)] - .to_vec(); - let path = format!( - "{http_origin}/v2/attachments/inv?index_block_hash={ibh}&pages_indexes={}", - attachments_batch - .iter() - .map(|a| format!("{a}")) - .collect::>() - .join(",") - ); - let attempts = 10; - let ts_begin = get_epoch_time_ms(); - for _ in 0..attempts { - let res = client.get(&path).send().unwrap(); - assert!( - res.status().is_success(), - "Bad response for `{path}`: `{:?}`", - res.text().unwrap() - ); - let attachment_inv_response: GetAttachmentsInvResponse = res.json().unwrap(); - eprintln!("attachment inv response for {path}: {attachment_inv_response:?}"); - } - let ts_end = get_epoch_time_ms(); - let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {path} {attempts} times in {total_time}ms"); + // Test 2: 9 transactions are posted to the follower. + // We want to make sure that the miner is able to + // 1) mine these transactions + // 2) retrieve the attachments staged on the follower node. + // 3) ensure that the follower is also instantiating the attachments after + // executing the transactions, once mined. + let namespace = "passport"; + for i in 1..10 { + let user = StacksPrivateKey::random(); + let zonefile_hex = format!("facade0{i}"); + let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); + let name = format!("johndoe{i}"); + let tx = make_contract_call( + &user_1, + 2 + i, + 500, + chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-import", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(name.as_bytes().to_vec()).unwrap(), + Value::Principal(to_addr(&user).into()), + Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), + ], + ); - // requests should take no more than max_request_time_ms - assert!( - total_time < attempts * max_request_time_ms, - "Atlas inventory request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" - ); + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; + + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/json") + .body(body) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); } + } - for attachment in attachments.iter().take(l) { - if *attachment == 0 { - continue; - } - let content_hash = attachment_hashes - .get(&(*ibh, *attachment)) - .cloned() - .unwrap() - .unwrap(); + bootstrap_node_tx + .send(Signal::ReplicatingAttachmentsStartTest2) + .expect("Unable to send signal"); - let path = format!("{http_origin}/v2/attachments/{content_hash}"); + let target_height = match follower_node_rx.recv() { + Ok(Signal::ReplicatingAttachmentsCheckTest2(target_height)) => target_height, + _ => panic!("Bootstrap node could not boot. Aborting test."), + }; - let attempts = 10; - let ts_begin = get_epoch_time_ms(); - for _ in 0..attempts { - let res = client.get(&path).send().unwrap(); - assert!( - res.status().is_success(), - "Bad response for `{path}`: `{:?}`", - res.text().unwrap() - ); + let mut sort_height = channel.get_sortitions_processed(); + while sort_height < target_height { + wait_for_runloop(&blocks_processed); + sort_height = channel.get_sortitions_processed(); + } + + // Poll GET v2/attachments/ + for i in 1..10 { + let mut attachments_did_sync = false; + let mut timeout = 60; + while !attachments_did_sync { + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); + let hashed_zonefile = Hash160::from_data(&zonefile_hex); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); + let res = client + .get(&path) + .header("Content-Type", "application/json") + .send() + .unwrap(); + eprintln!("{res:#?}"); + if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); - eprintln!("attachment response for {path}: {attachment_response:?}"); + assert_eq!(attachment_response.attachment.content, zonefile_hex); + attachments_did_sync = true; + } else { + timeout -= 1; + if timeout == 0 { + panic!("Failed syncing 9 attachments between 2 neon runloops within 60s (failed at {}) - Something is wrong", &to_hex(&zonefile_hex)); + } + eprintln!("Attachment {} not sync'd yet", bytes_to_hex(&zonefile_hex)); + thread::sleep(Duration::from_millis(1000)); } - let ts_end = get_epoch_time_ms(); - let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {path} {attempts} times in {total_time}ms"); - - // requests should take no more than max_request_time_ms - assert!( - total_time < attempts * max_request_time_ms, - "Atlas chunk request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" - ); } } + // Ensure that we the attached sidecar was able to receive a total of 10 attachments + // This last assertion is flacky for some reason, it does not worth bullying the CI or disabling this whole test + // We're using an inequality as a best effort, to make sure that **some** attachments were received. + assert!(!test_observer::get_attachments().is_empty()); test_observer::clear(); + channel.stop_chains_coordinator(); + + bootstrap_node_thread.join().unwrap(); } -/// Run a fixed contract 20 times. Linearly increase the amount paid each time. The cost of the -/// contract should stay the same, and the fee rate paid should monotonically grow. The value -/// should grow faster for lower values of `window_size`, because a bigger window slows down the -/// growth. -fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value: f64) { +#[test] +#[ignore] +fn antientropy_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let max_contract_src = r#" -;; define counter variable -(define-data-var counter int 0) + let user_1 = StacksPrivateKey::random(); + let initial_balance_user_1 = InitialBalance { + address: to_addr(&user_1).into(), + amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), + }; -;; increment method -(define-public (increment) - (begin - (var-set counter (+ (var-get counter) 1)) - (ok (var-get counter)))) + // Prepare the config of the bootstrap node + let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); + let bootstrap_node_public_key = { + let keychain = Keychain::default(conf_bootstrap_node.node.seed.clone()); + let mut pk = keychain.generate_op_signer().get_public_key(); + pk.set_compressed(true); + pk.to_hex() + }; + conf_bootstrap_node + .initial_balances + .push(initial_balance_user_1.clone()); + conf_bootstrap_node.connection_options.antientropy_retry = 10; // move this along -- do anti-entropy protocol once every 10 seconds + conf_bootstrap_node.connection_options.antientropy_public = true; // always push blocks, even if we're not NAT'ed + conf_bootstrap_node.connection_options.max_block_push = 1000; + conf_bootstrap_node.connection_options.max_microblock_push = 1000; - (define-public (increment-many) - (begin - (unwrap! (increment) (err u1)) - (unwrap! (increment) (err u1)) - (unwrap! (increment) (err u1)) - (unwrap! (increment) (err u1)) - (ok (var-get counter)))) - "#; + conf_bootstrap_node.node.mine_microblocks = true; + conf_bootstrap_node.miner.microblock_attempt_time_ms = 2_000; + conf_bootstrap_node.node.wait_time_for_microblocks = 0; + conf_bootstrap_node.node.microblock_frequency = 0; + conf_bootstrap_node.miner.first_attempt_time_ms = 1_000_000; + conf_bootstrap_node.miner.subsequent_attempt_time_ms = 1_000_000; + conf_bootstrap_node.burnchain.max_rbf = 1000000; + conf_bootstrap_node.node.wait_time_for_blocks = 1_000; - let spender_sk = StacksPrivateKey::random(); - let spender_addr = to_addr(&spender_sk); + conf_bootstrap_node.node.always_use_affirmation_maps = false; - let (mut conf, _) = neon_integration_test_conf(); + // Prepare the config of the follower node + let (mut conf_follower_node, _) = neon_integration_test_conf(); + let bootstrap_node_url = format!( + "{bootstrap_node_public_key}@{}", + conf_bootstrap_node.node.p2p_bind + ); + conf_follower_node.connection_options.disable_block_download = true; + conf_follower_node.node.set_bootstrap_nodes( + bootstrap_node_url, + conf_follower_node.burnchain.chain_id, + conf_follower_node.burnchain.peer_version, + ); + conf_follower_node.node.miner = false; + conf_follower_node + .initial_balances + .push(initial_balance_user_1); + conf_follower_node + .events_observers + .insert(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1000, + disable_retries: false, + }); - // Set this estimator as special. - conf.estimation.fee_estimator = Some(FeeEstimatorName::FuzzedWeightedMedianFeeRate); - // Use randomness of 0 to keep test constant. Randomness is tested in unit tests. - conf.estimation.fee_rate_fuzzer_fraction = 0f64; - conf.estimation.fee_rate_window_size = window_size; + conf_follower_node.node.mine_microblocks = true; + conf_follower_node.miner.microblock_attempt_time_ms = 2_000; + conf_follower_node.node.wait_time_for_microblocks = 0; + conf_follower_node.node.microblock_frequency = 0; + conf_follower_node.miner.first_attempt_time_ms = 1_000_000; + conf_follower_node.miner.subsequent_attempt_time_ms = 1_000_000; + conf_follower_node.burnchain.max_rbf = 1000000; + conf_follower_node.node.wait_time_for_blocks = 1_000; + + conf_follower_node.node.always_use_affirmation_maps = false; + + // Our 2 nodes will share the bitcoind node + let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + + let (bootstrap_node_tx, bootstrap_node_rx) = mpsc::channel(); + let (follower_node_tx, follower_node_rx) = mpsc::channel(); + + let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); + let target_height = 3 + (3 * burnchain_config.pox_constants.reward_cycle_length); + + let bootstrap_node_thread = thread::spawn(move || { + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf_bootstrap_node.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + for i in 0..(target_height - 3) { + eprintln!("Mine block {i}"); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let sort_height = channel.get_sortitions_processed(); + eprintln!("Sort height: {sort_height}"); + } + + // Let's setup the follower now. + follower_node_tx + .send(Signal::BootstrapNodeReady) + .expect("Unable to send signal"); - conf.initial_balances.push(InitialBalance { - address: spender_addr.into(), - amount: 10000000000, + eprintln!("Bootstrap node informed follower that it's ready; waiting for acknowledgement"); + + // wait for bootstrap node to terminate + match bootstrap_node_rx.recv() { + Ok(Signal::FollowerNodeReady) => { + println!("Follower has finished"); + } + Ok(x) => { + panic!("Follower gave a bad signal: {x:?}"); + } + Err(e) => { + panic!("Failed to recv: {e:?}"); + } + }; + + channel.stop_chains_coordinator(); }); - test_observer::spawn(); - test_observer::register_any(&mut conf); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + // Start the attached observer + test_observer::spawn(); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + // The bootstrap node mined a few blocks and is ready, let's setup this node. + match follower_node_rx.recv() { + Ok(Signal::BootstrapNodeReady) => { + println!("Booting follower node..."); + } + _ => panic!("Bootstrap node could not boot. Aborting test."), + }; - btc_regtest_controller.bootstrap_chain(200); + let burnchain_config = Burnchain::regtest(&conf_follower_node.get_burn_db_path()); + let http_origin = format!("http://{}", &conf_follower_node.node.rpc_bind); eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf.clone()); + let mut run_loop = neon::RunLoop::new(conf_follower_node.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - thread::spawn(move || run_loop.start(None, 0)); + let thread_burnchain_config = burnchain_config.clone(); + thread::spawn(move || run_loop.start(Some(thread_burnchain_config), 0)); + // give the run loop some time to start up! wait_for_runloop(&blocks_processed); - run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 210, &conf); - submit_tx( - &http_origin, - &make_contract_publish( - &spender_sk, - 0, - 110000, - conf.burnchain.chain_id, - "increment-contract", - max_contract_src, - ), - ); - run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 212, &conf); - - // Loop 20 times. Each time, execute the same transaction, but increase the amount *paid*. - // This will exercise the window size. - let mut response_estimated_costs = vec![]; - let mut response_top_fee_rates = vec![]; - for i in 1..21 { - submit_tx( - &http_origin, - &make_contract_call( - &spender_sk, - i, // nonce - i * 100000, // payment - conf.burnchain.chain_id, - &spender_addr, - "increment-contract", - "increment-many", - &[], - ), - ); - run_until_burnchain_height( - &mut btc_regtest_controller, - &blocks_processed, - 212 + 2 * i, - &conf, + let mut sort_height = channel.get_sortitions_processed(); + while sort_height < (target_height + 200) as u64 { + eprintln!( + "Follower sortition is {sort_height}, target is {}", + target_height + 200 ); + wait_for_runloop(&blocks_processed); + sort_height = channel.get_sortitions_processed(); + sleep_ms(1000); + } - { - // Read from the fee estimation endpoin. - let path = format!("{http_origin}/v2/fees/transaction"); + eprintln!("Follower booted up; waiting for blocks"); - let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { - address: spender_addr, - contract_name: ContractName::from("increment-contract"), - function_name: ClarityName::from("increment-many"), - function_args: vec![], - }); + // wait for block height to reach target + let mut tip_height = get_chain_tip_height(&http_origin); + eprintln!( + "Follower Stacks tip height is {tip_height}, wait until {tip_height} >= {target_height} - 3" + ); - let payload_data = tx_payload.serialize_to_vec(); - let payload_hex = format!("0x{}", to_hex(&payload_data)); + let btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf_follower_node.clone(), + None, + Some(burnchain_config), + None, + ); - let body = json!({ "transaction_payload": payload_hex.clone() }); + let mut burnchain_deadline = get_epoch_time_secs() + 60; + while tip_height < (target_height - 3) as u64 { + sleep_ms(1000); + tip_height = get_chain_tip_height(&http_origin); - let client = reqwest::blocking::Client::new(); - let fee_rate_result = client - .post(&path) - .json(&body) - .send() - .expect("Should be able to post") - .json::() - .expect("Failed to parse result into JSON"); + eprintln!("Follower Stacks tip height is {tip_height}"); - response_estimated_costs.push(fee_rate_result.estimated_cost_scalar); - response_top_fee_rates.push(fee_rate_result.estimations.last().unwrap().fee_rate); + if burnchain_deadline < get_epoch_time_secs() { + burnchain_deadline = get_epoch_time_secs() + 60; + btc_regtest_controller.build_next_block(1); } } - // Wait two extra blocks to be sure. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + bootstrap_node_tx + .send(Signal::FollowerNodeReady) + .expect("Unable to send signal"); + bootstrap_node_thread.join().unwrap(); - assert_eq!(response_estimated_costs.len(), response_top_fee_rates.len()); + eprintln!("Follower node finished"); - // Check that: - // 1) The cost is always the same. - // 2) Fee rate grows monotonically. - for i in 1..response_estimated_costs.len() { - let curr_cost = response_estimated_costs[i]; - let last_cost = response_estimated_costs[i - 1]; - assert_eq!(curr_cost, last_cost); + test_observer::clear(); + channel.stop_chains_coordinator(); +} - let curr_rate = response_top_fee_rates[i]; - let last_rate = response_top_fee_rates[i - 1]; - assert!(curr_rate >= last_rate); - } +#[allow(clippy::too_many_arguments)] +fn wait_for_mined( + btc_regtest_controller: &mut BitcoinRegtestController, + blocks_processed: &Arc, + http_origin: &str, + users: &[StacksPrivateKey], + account_before_nonces: &[u64], + batch_size: usize, + batches: usize, + index_block_hashes: &mut Vec, +) { + let mut all_mined_vec = vec![false; batches * batch_size]; + let mut account_after_nonces = vec![0; batches * batch_size]; + let mut all_mined = false; + for _k in 0..10 { + next_block_and_wait(btc_regtest_controller, blocks_processed); + sleep_ms(10_000); - // Check the final value is near input parameter. - assert!(is_close_f64( - *response_top_fee_rates.last().unwrap(), - expected_final_value - )); + let (ch, bhh) = get_chain_tip(http_origin); + let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - channel.stop_chains_coordinator(); -} + if let Some(last_ibh) = index_block_hashes.last() { + if *last_ibh != ibh { + index_block_hashes.push(ibh); + eprintln!("Tip is now {ibh}"); + } + } -/// Test the FuzzedWeightedMedianFeeRate with window size 5 and randomness 0. We increase the -/// amount paid linearly each time. This estimate should grow *faster* than with window size 10. -#[test] -#[ignore] -fn fuzzed_median_fee_rate_estimation_test_window5() { - fuzzed_median_fee_rate_estimation_test(5, 202680.0992) -} + for j in 0..batches * batch_size { + let account_after = get_account(http_origin, &to_addr(&users[j])); + let account_after_nonce = account_after.nonce; + account_after_nonces[j] = account_after_nonce; -/// Test the FuzzedWeightedMedianFeeRate with window size 10 and randomness 0. We increase the -/// amount paid linearly each time. This estimate should grow *slower* than with window size 5. -#[test] -#[ignore] -fn fuzzed_median_fee_rate_estimation_test_window10() { - fuzzed_median_fee_rate_estimation_test(10, 90080.5496) + if account_before_nonces[j] < account_after_nonce { + all_mined_vec[j] = true; + } + } + + all_mined = all_mined_vec.iter().all(|elem| *elem); + if all_mined { + break; + } + } + if !all_mined { + panic!( + "Failed to mine all transactions: nonces = {account_after_nonces:?}, expected {account_before_nonces:?} + {batch_size}" + ); + } } #[test] #[ignore] -fn use_latest_tip_integration_test() { - // The purpose of this test is to check if setting the query parameter `tip` to `latest` is working - // as expected. Multiple endpoints accept this parameter, and in this test, we are using the - // GetContractSrc method to test it. - // - // The following scenarios are tested here: - // - The caller does not specify the tip paramater, and the canonical chain tip is used regardless of the - // state of the unconfirmed microblock stream. - // - The caller passes tip=latest with an existing unconfirmed microblock stream, and - // Clarity state from the unconfirmed microblock stream is successfully loaded. - // - The caller passes tip=latest with an empty unconfirmed microblock stream, and - // Clarity state from the canonical chain tip is successfully loaded (i.e. you don't - // get a 404 even though the unconfirmed chain tip points to a nonexistent MARF trie). - // - // Note: In this test, we are manually creating a microblock as well as reloading the unconfirmed - // state of the chainstate, instead of relying on `next_block_and_wait` to generate - // microblocks. We do this because the unconfirmed state is not automatically being initialized - // on the node, so attempting to validate any transactions against the expected unconfirmed - // state fails. +fn atlas_stress_integration_test() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_stacks_addr = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stacks_addr.into(); + let mut initial_balances = vec![]; + let mut users = vec![]; + + let batches = 5; + let batch_size = 20; + + for _i in 0..(2 * batches * batch_size + 1) { + let user = StacksPrivateKey::random(); + let initial_balance_user = InitialBalance { + address: to_addr(&user).into(), + amount: 1_000_000_000 * u64::from(core::MICROSTACKS_PER_STACKS), + }; + users.push(user); + initial_balances.push(initial_balance_user); + } + + // Prepare the config of the bootstrap node + let (mut conf_bootstrap_node, _) = neon_integration_test_conf(); + conf_bootstrap_node + .initial_balances + .append(&mut initial_balances.clone()); - let (mut conf, _) = neon_integration_test_conf(); + conf_bootstrap_node.miner.first_attempt_time_ms = u64::MAX; + conf_bootstrap_node.miner.subsequent_attempt_time_ms = u64::MAX; - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 100300, - }); + conf_bootstrap_node.node.mine_microblocks = true; + conf_bootstrap_node.miner.microblock_attempt_time_ms = 2_000; + conf_bootstrap_node.node.wait_time_for_microblocks = 0; + conf_bootstrap_node.node.microblock_frequency = 0; + conf_bootstrap_node.miner.first_attempt_time_ms = 1_000_000; + conf_bootstrap_node.miner.subsequent_attempt_time_ms = 2_000_000; + conf_bootstrap_node.burnchain.max_rbf = 1000000; + conf_bootstrap_node.node.wait_time_for_blocks = 1_000; - conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 10_000; - conf.node.microblock_frequency = 1_000; + conf_bootstrap_node.node.always_use_affirmation_maps = false; + + let user_1 = users.pop().unwrap(); + let initial_balance_user_1 = initial_balances.pop().unwrap(); + // Start the attached observer test_observer::spawn(); - test_observer::register_any(&mut conf); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + let mut btcd_controller = BitcoinCoreController::new(conf_bootstrap_node.clone()); btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + let burnchain_config = Burnchain::regtest(&conf_bootstrap_node.get_burn_db_path()); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf_bootstrap_node.clone(), + None, + Some(burnchain_config.clone()), + None, + ); + let http_origin = format!("http://{}", &conf_bootstrap_node.node.rpc_bind); btc_regtest_controller.bootstrap_chain(201); eprintln!("Chain bootstrapped..."); - let mut run_loop = neon::RunLoop::new(conf.clone()); + let mut run_loop = neon::RunLoop::new(conf_bootstrap_node.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); + let client = reqwest::blocking::Client::new(); - thread::spawn(move || run_loop.start(None, 0)); + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); - // Give the run loop some time to start up! + // give the run loop some time to start up! wait_for_runloop(&blocks_processed); - // First block wakes up the run loop. + // first block wakes up the run loop next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // Second block will hold our VRF registration. + // first block will hold our VRF registration next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // Third block will be the first mined Stacks block. + // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // Let's query our first spender. - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.balance, 100300); - assert_eq!(account.nonce, 0); + let mut index_block_hashes = vec![]; - // this call wakes up our node - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // Let's publish a (1) namespace-preorder, (2) namespace-reveal and (3) name-import in this mempool + + // (define-public (namespace-preorder (hashed-salted-namespace (buff 20)) + // (stx-to-burn uint)) + let namespace = "passport"; + let salt = "some-salt"; + let salted_namespace = format!("{namespace}{salt}"); + let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); + let tx_1 = make_contract_call( + &user_1, + 0, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "namespace-preorder", + &[ + Value::buff_from(hashed_namespace.to_bytes().to_vec()).unwrap(), + Value::UInt(1000000000), + ], + ); + + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx_1.clone()) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if res.status().is_success() { + let res: String = res.json().unwrap(); + assert_eq!( + res, + StacksTransaction::consensus_deserialize(&mut &tx_1[..]) + .unwrap() + .txid() + .to_string() + ); + } else { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + + // (define-public (namespace-reveal (namespace (buff 20)) + // (namespace-salt (buff 20)) + // (p-func-base uint) + // (p-func-coeff uint) + // (p-func-b1 uint) + // (p-func-b2 uint) + // (p-func-b3 uint) + // (p-func-b4 uint) + // (p-func-b5 uint) + // (p-func-b6 uint) + // (p-func-b7 uint) + // (p-func-b8 uint) + // (p-func-b9 uint) + // (p-func-b10 uint) + // (p-func-b11 uint) + // (p-func-b12 uint) + // (p-func-b13 uint) + // (p-func-b14 uint) + // (p-func-b15 uint) + // (p-func-b16 uint) + // (p-func-non-alpha-discount uint) + // (p-func-no-vowel-discount uint) + // (lifetime uint) + // (namespace-import principal)) + let tx_2 = make_contract_call( + &user_1, + 1, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "namespace-reveal", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(salt.as_bytes().to_vec()).unwrap(), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1), + Value::UInt(1000), + Value::Principal(initial_balance_user_1.address), + ], + ); + + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx_2.clone()) + .send() + .unwrap(); + eprintln!("{:#?}", res); + if res.status().is_success() { + let res: String = res.json().unwrap(); + assert_eq!( + res, + StacksTransaction::consensus_deserialize(&mut &tx_2[..]) + .unwrap() + .txid() + .to_string() + ); + } else { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + + let mut mined_namespace_reveal = false; + for _j in 0..10 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(10_000); + + let account_after = get_account(&http_origin, &to_addr(&user_1)); + if account_after.nonce == 2 { + mined_namespace_reveal = true; + break; + } + } + assert!( + mined_namespace_reveal, + "Did not mine namespace preorder or reveal" + ); + + // make a _ton_ of name-imports + for i in 0..batches { + let account_before = get_account(&http_origin, &to_addr(&user_1)); + + for j in 0..batch_size { + // (define-public (name-import (namespace (buff 20)) + // (name (buff 48)) + // (zonefile-hash (buff 20))) + let zonefile_hex = format!("facade00{:04x}{:04x}{:04x}", batch_size * i + j, i, j); + let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); + + let tx_3 = make_contract_call( + &user_1, + 2 + (batch_size * i + j) as u64, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-import", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(format!("johndoe{}", i * batch_size + j).as_bytes().to_vec()) + .unwrap(), + Value::Principal(to_addr(&users[i * batch_size + j]).into()), + Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), + ], + ); + + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx_3), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; + + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/json") + .body(body) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + } - // Open chainstate. - // TODO (hack) instantiate the sortdb in the burnchain - let _ = btc_regtest_controller.sortdb_mut(); - let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf); - let tip_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); - let (mut chainstate, _) = StacksChainState::open( - false, - CHAIN_ID_TESTNET, - &conf.get_chainstate_path_str(), - None, - ) - .unwrap(); + // wait for them all to be mined + let mut all_mined = false; + let account_after_nonce = 0; + for _j in 0..10 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(10_000); - // Initialize the unconfirmed state. - chainstate - .reload_unconfirmed_state( - &btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(), - tip_hash, - ) - .unwrap(); + let (ch, bhh) = get_chain_tip(&http_origin); + let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); + index_block_hashes.push(ibh); - // Make microblock with two transactions. - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - let transfer_tx = make_stacks_transfer_mblock_only( - &spender_sk, - 0, - 1000, - conf.burnchain.chain_id, - &recipient.into(), - 1000, - ); + let account_after = get_account(&http_origin, &to_addr(&user_1)); + let account_after_nonce = account_after.nonce; + if account_before.nonce + (batch_size as u64) <= account_after_nonce { + all_mined = true; + break; + } + } + assert!( + all_mined, + "Failed to mine all transactions: nonce = {account_after_nonce}, expected {}", + account_before.nonce + (batch_size as u64) + ); + } - let caller_src = " - (define-public (execute) - (ok stx-liquid-supply)) - "; - let publish_tx = make_contract_publish_microblock_only( - &spender_sk, - 1, + // launch namespace + // (define-public (namespace-ready (namespace (buff 20))) + let namespace = "passport"; + let tx_4 = make_contract_call( + &user_1, + 2 + (batch_size as u64) * (batches as u64), 1000, - conf.burnchain.chain_id, - "caller", - caller_src, - ); - - let tx_1 = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); - let tx_2 = StacksTransaction::consensus_deserialize(&mut &publish_tx[..]).unwrap(); - let vec_tx = vec![tx_1, tx_2]; - let privk = - find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024).unwrap(); - let iconn = btc_regtest_controller - .sortdb_ref() - .index_handle_at_block(&chainstate, &tip_hash) - .unwrap(); - let mblock = make_microblock( - &privk, - &mut chainstate, - &iconn, - consensus_hash, - stacks_block, - vec_tx, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "namespace-ready", + &[Value::buff_from(namespace.as_bytes().to_vec()).unwrap()], ); - let mut mblock_bytes = vec![]; - mblock.consensus_serialize(&mut mblock_bytes).unwrap(); - - let client = reqwest::blocking::Client::new(); - // Post the microblock - let path = format!("{http_origin}/v2/microblocks"); - let res: String = client + let path = format!("{http_origin}/v2/transactions"); + let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(mblock_bytes.clone()) + .body(tx_4) .send() - .unwrap() - .json() .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } - assert_eq!(res, format!("{}", &mblock.block_hash())); + let mut mined_namespace_ready = false; + for _j in 0..10 { + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(10_000); - // Wait for the microblock to be accepted - sleep_ms(5_000); - let path = format!("{http_origin}/v2/info"); - let mut iter_count = 0; - loop { - let tip_info = client - .get(&path) - .send() - .unwrap() - .json::() - .unwrap(); - eprintln!("{:#?}", tip_info); - if tip_info.unanchored_tip == Some(StacksBlockId([0; 32])) { - iter_count += 1; - assert!( - iter_count < 10, - "Hit retry count while waiting for net module to process pushed microblock" - ); - sleep_ms(5_000); - continue; - } else { + let (ch, bhh) = get_chain_tip(&http_origin); + let ibh = StacksBlockHeader::make_index_block_hash(&ch, &bhh); + index_block_hashes.push(ibh); + + let account_after = get_account(&http_origin, &to_addr(&user_1)); + if account_after.nonce == 2 + (batch_size as u64) * (batches as u64) { + mined_namespace_ready = true; break; } } + assert!(mined_namespace_ready, "Did not mine namespace ready"); - // Wait at least two p2p refreshes so it can produce the microblock. - for i in 0..30 { - info!( - "wait {} more seconds for microblock miner to find our transaction...", - 30 - i - ); - sleep_ms(1000); - } + // make a _ton_ of preorders + { + let mut account_before_nonces = vec![0; batches * batch_size]; + for j in 0..batches * batch_size { + let account_before = + get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); + account_before_nonces[j] = account_before.nonce; - // Check event observer for new microblock event (expect 1). - let microblock_events = test_observer::get_microblocks(); - assert_eq!(microblock_events.len(), 1); + let fqn = format!("janedoe{j}.passport"); + let fqn_bytes = fqn.as_bytes().to_vec(); + let salt = format!("{:04x}", j); + let salt_bytes = salt.as_bytes().to_vec(); + let mut hash_data = fqn_bytes.clone(); + hash_data.append(&mut salt_bytes.clone()); - // Don't set the tip parameter, and ask for the source of the contract we just defined in a microblock. - // This should fail because the anchored tip would be unaware of this contract. - let err_opt = get_contract_src( - &http_origin, - spender_stacks_addr, - "caller".to_string(), - false, - ); - match err_opt { - Ok(_) => { - panic!( - "Asking for the contract source off the anchored tip for a contract published \ - only in unconfirmed state should error." - ); - } - // Expect to get "NoSuchContract" because the function we are attempting to call is in a - // contract that only exists on unconfirmed state (and we did not set tip). - Err(err_str) => { - assert!(err_str.contains("No contract source data found")); - } - } + let salted_hash = Hash160::from_data(&hash_data); - // Set tip=latest, and ask for the source of the contract defined in the microblock. - // This should succeeed. - assert!(get_contract_src( - &http_origin, - spender_stacks_addr, - "caller".to_string(), - true, - ) - .is_ok()); + let tx_5 = make_contract_call( + &users[batches * batch_size + j], + 0, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-preorder", + &[ + Value::buff_from(salted_hash.0.to_vec()).unwrap(), + Value::UInt(500), + ], + ); - // Mine an anchored block because now we want to have no unconfirmed state. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx_5.clone()) + .send() + .unwrap(); - // Check that the underlying trie for the unconfirmed state does not exist. - assert!(chainstate.unconfirmed_state.is_some()); - let unconfirmed_state = chainstate.unconfirmed_state.as_mut().unwrap(); - let trie_exists = match unconfirmed_state - .clarity_inst - .trie_exists_for_block(&unconfirmed_state.unconfirmed_chain_tip) - { - Ok(res) => res, - Err(e) => { - panic!("error when determining whether or not trie exists: {:?}", e); + eprintln!( + "sent preorder for {}:\n{res:#?}", + &to_addr(&users[batches * batch_size + j]) + ); + if !res.status().is_success() { + panic!(""); + } } - }; - assert!(!trie_exists); - - // Set tip=latest, and ask for the source of the contract defined in the previous epoch. - // The underlying MARF trie for the unconfirmed tip does not exist, so the transaction will be - // validated against the confirmed chain tip instead of the unconfirmed tip. This should be valid. - assert!(get_contract_src( - &http_origin, - spender_stacks_addr, - "caller".to_string(), - true, - ) - .is_ok()); -} -#[test] -#[ignore] -fn test_flash_block_skip_tenure() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; + wait_for_mined( + &mut btc_regtest_controller, + &blocks_processed, + &http_origin, + &users[batches * batch_size..], + &account_before_nonces, + batch_size, + batches, + &mut index_block_hashes, + ); } - let (mut conf, miner_account) = neon_integration_test_conf(); - conf.miner.microblock_attempt_time_ms = 5_000; - conf.node.wait_time_for_microblocks = 0; - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + // make a _ton_ of registers + { + let mut account_before_nonces = vec![0; batches * batch_size]; + for j in 0..batches * batch_size { + let account_before = + get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); + account_before_nonces[j] = account_before.nonce; - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); + let name = format!("janedoe{j}"); + let salt = format!("{j:04x}"); - btc_regtest_controller.bootstrap_chain(201); + let zonefile_hex = format!("facade01{j:04x}"); + let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - eprintln!("Chain bootstrapped..."); + let tx_6 = make_contract_call( + &users[batches * batch_size + j], + 1, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-register", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(name.as_bytes().to_vec()).unwrap(), + Value::buff_from(salt.as_bytes().to_vec()).unwrap(), + Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), + ], + ); - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let missed_tenures = run_loop.get_missed_tenures_arc(); + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx_6), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; - let channel = run_loop.get_coordinator_channel().unwrap(); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/json") + .body(body) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + } - thread::spawn(move || run_loop.start(None, 0)); + wait_for_mined( + &mut btc_regtest_controller, + &blocks_processed, + &http_origin, + &users[batches * batch_size..], + &account_before_nonces, + batch_size, + batches, + &mut index_block_hashes, + ); + } - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); + // make a _ton_ of updates + { + let mut account_before_nonces = vec![0; batches * batch_size]; + for j in 0..batches * batch_size { + let account_before = + get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); + account_before_nonces[j] = account_before.nonce; - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade02{j:04x}"); + let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let tx_7 = make_contract_call( + &users[batches * batch_size + j], + 2, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-update", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(name.as_bytes().to_vec()).unwrap(), + Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap(), + ], + ); - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx_7), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; - // fault injection: force tenures to take too long - std::env::set_var("STX_TEST_SLOW_TENURE", "11000"); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/json") + .body(body) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + } - for i in 0..10 { - // build one bitcoin block every 10 seconds - eprintln!("Build bitcoin block +{i}"); - btc_regtest_controller.build_next_block(1); - sleep_ms(10000); + wait_for_mined( + &mut btc_regtest_controller, + &blocks_processed, + &http_origin, + &users[batches * batch_size..], + &account_before_nonces, + batch_size, + batches, + &mut index_block_hashes, + ); } - // at least one tenure was skipped - let num_skipped = missed_tenures.load(Ordering::SeqCst); - eprintln!("Skipped {num_skipped} tenures"); - assert!(num_skipped > 1); - - // let's query the miner's account nonce: - - eprintln!("Miner account: {miner_account}"); - - let account = get_account(&http_origin, &miner_account); - eprintln!("account = {account:?}"); - assert_eq!(account.balance, 0); - assert_eq!(account.nonce, 2); + // make a _ton_ of renewals + { + let mut account_before_nonces = vec![0; batches * batch_size]; + for j in 0..batches * batch_size { + let account_before = + get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); + account_before_nonces[j] = account_before.nonce; - channel.stop_chains_coordinator(); -} + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade03{j:04x}"); + let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); -#[test] -#[ignore] -fn test_chainwork_first_intervals() { - let (conf, _) = neon_integration_test_conf(); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + let tx_8 = make_contract_call( + &users[batches * batch_size + j], + 3, + 1000, + conf_bootstrap_node.burnchain.chain_id, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "bns", + "name-renewal", + &[ + Value::buff_from(namespace.as_bytes().to_vec()).unwrap(), + Value::buff_from(name.as_bytes().to_vec()).unwrap(), + Value::UInt(500), + Value::none(), + Value::some(Value::buff_from(hashed_zonefile.as_bytes().to_vec()).unwrap()) + .unwrap(), + ], + ); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let body = { + let content = PostTransactionRequestBody { + tx: bytes_to_hex(&tx_8), + attachment: Some(zonefile_hex.to_string()), + }; + serde_json::to_vec(&json!(content)).unwrap() + }; - btc_regtest_controller.bootstrap_chain(2016 * 2 - 1); + let path = format!("{http_origin}/v2/transactions"); + let res = client + .post(&path) + .header("Content-Type", "application/json") + .body(body) + .send() + .unwrap(); + eprintln!("{res:#?}"); + if !res.status().is_success() { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } + } - eprintln!("Chain bootstrapped..."); + wait_for_mined( + &mut btc_regtest_controller, + &blocks_processed, + &http_origin, + &users[batches * batch_size..], + &account_before_nonces, + batch_size, + batches, + &mut index_block_hashes, + ); + } - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); + // find all attachment indexes and make sure we can get them + let mut attachment_indexes = HashMap::new(); + let mut attachment_hashes = HashMap::new(); + { + let atlasdb_path = conf_bootstrap_node.get_atlas_db_file_path(); + let atlasdb = AtlasDB::connect(AtlasConfig::new(false), &atlasdb_path, false).unwrap(); + for ibh in index_block_hashes.iter() { + let indexes = query_rows::( + &atlasdb.conn, + "SELECT attachment_index FROM attachment_instances WHERE index_block_hash = ?1", + &[ibh], + ) + .unwrap(); + if !indexes.is_empty() { + attachment_indexes.insert(*ibh, indexes.clone()); + } - let channel = run_loop.get_coordinator_channel().unwrap(); + for index in indexes.iter() { + let mut hashes = query_row_columns::( + &atlasdb.conn, + "SELECT content_hash FROM attachment_instances WHERE index_block_hash = ?1 AND attachment_index = ?2", + params![ibh, u64_to_sql(*index).unwrap()], + "content_hash") + .unwrap(); + if !hashes.is_empty() { + assert_eq!(hashes.len(), 1); + attachment_hashes.insert((*ibh, *index), hashes.pop()); + } + } + } + } + eprintln!("attachment_indexes = {attachment_indexes:?}"); - thread::spawn(move || run_loop.start(None, 0)); + let max_request_time_ms = 100; - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - channel.stop_chains_coordinator(); -} + for (ibh, attachments) in attachment_indexes.iter() { + let l = attachments.len(); + for i in 0..(l / MAX_ATTACHMENT_INV_PAGES_PER_REQUEST + 1) { + if i * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST >= l { + break; + } -#[test] -#[ignore] -fn test_chainwork_partial_interval() { - let (conf, _) = neon_integration_test_conf(); - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); + let attachments_batch = attachments[i * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST + ..cmp::min((i + 1) * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, l)] + .to_vec(); + let path = format!( + "{http_origin}/v2/attachments/inv?index_block_hash={ibh}&pages_indexes={}", + attachments_batch + .iter() + .map(|a| format!("{a}")) + .collect::>() + .join(",") + ); - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let attempts = 10; + let ts_begin = get_epoch_time_ms(); + for _ in 0..attempts { + let res = client.get(&path).send().unwrap(); + assert!( + res.status().is_success(), + "Bad response for `{path}`: `{:?}`", + res.text().unwrap() + ); + let attachment_inv_response: GetAttachmentsInvResponse = res.json().unwrap(); + eprintln!("attachment inv response for {path}: {attachment_inv_response:?}"); + } + let ts_end = get_epoch_time_ms(); + let total_time = ts_end.saturating_sub(ts_begin); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); - btc_regtest_controller.bootstrap_chain(2016 - 1); + // requests should take no more than max_request_time_ms + assert!( + total_time < attempts * max_request_time_ms, + "Atlas inventory request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" + ); + } - eprintln!("Chain bootstrapped..."); + for attachment in attachments.iter().take(l) { + if *attachment == 0 { + continue; + } + let content_hash = attachment_hashes + .get(&(*ibh, *attachment)) + .cloned() + .unwrap() + .unwrap(); - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); + let path = format!("{http_origin}/v2/attachments/{content_hash}"); - let channel = run_loop.get_coordinator_channel().unwrap(); + let attempts = 10; + let ts_begin = get_epoch_time_ms(); + for _ in 0..attempts { + let res = client.get(&path).send().unwrap(); + assert!( + res.status().is_success(), + "Bad response for `{path}`: `{:?}`", + res.text().unwrap() + ); + let attachment_response: GetAttachmentResponse = res.json().unwrap(); + eprintln!("attachment response for {path}: {attachment_response:?}"); + } + let ts_end = get_epoch_time_ms(); + let total_time = ts_end.saturating_sub(ts_begin); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); - thread::spawn(move || run_loop.start(None, 0)); + // requests should take no more than max_request_time_ms + assert!( + total_time < attempts * max_request_time_ms, + "Atlas chunk request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" + ); + } + } - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - channel.stop_chains_coordinator(); + test_observer::clear(); } -#[test] -#[ignore] -fn test_problematic_txs_are_not_stored() { +/// Run a fixed contract 20 times. Linearly increase the amount paid each time. The cost of the +/// contract should stay the same, and the fee rate paid should monotonically grow. The value +/// should grow faster for lower values of `window_size`, because a bigger window slows down the +/// growth. +fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value: f64) { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); - let spender_sk_3 = StacksPrivateKey::from_hex(SK_3).unwrap(); - let spender_stacks_addr_1 = to_addr(&spender_sk_1); - let spender_stacks_addr_2 = to_addr(&spender_sk_2); - let spender_stacks_addr_3 = to_addr(&spender_sk_3); - let spender_addr_1: PrincipalData = spender_stacks_addr_1.into(); - let spender_addr_2: PrincipalData = spender_stacks_addr_2.into(); - let spender_addr_3: PrincipalData = spender_stacks_addr_3.into(); + let max_contract_src = r#" +;; define counter variable +(define-data-var counter int 0) - let (mut conf, _) = neon_integration_test_conf(); +;; increment method +(define-public (increment) + (begin + (var-set counter (+ (var-get counter) 1)) + (ok (var-get counter)))) - conf.initial_balances.push(InitialBalance { - address: spender_addr_1, - amount: 1_000_000_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: spender_addr_2, - amount: 1_000_000_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: spender_addr_3, - amount: 1_000_000_000_000, - }); + (define-public (increment-many) + (begin + (unwrap! (increment) (err u1)) + (unwrap! (increment) (err u1)) + (unwrap! (increment) (err u1)) + (unwrap! (increment) (err u1)) + (ok (var-get counter)))) + "#; - // force mainnet limits in 2.05 for this test - conf.burnchain.epochs = Some(EpochList::new(&[ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 1, - block_limit: BLOCK_LIMIT_MAINNET_20.clone(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 1, - end_height: 10_002, - block_limit: BLOCK_LIMIT_MAINNET_205.clone(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 10_002, - end_height: 9223372036854775807, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ])); - conf.burnchain.pox_2_activation = Some(10_003); + let spender_sk = StacksPrivateKey::random(); + let spender_addr = to_addr(&spender_sk); - // take effect immediately - conf.burnchain.ast_precheck_size_height = Some(0); + let (mut conf, _) = neon_integration_test_conf(); + + // Set this estimator as special. + conf.estimation.fee_estimator = Some(FeeEstimatorName::FuzzedWeightedMedianFeeRate); + // Use randomness of 0 to keep test constant. Randomness is tested in unit tests. + conf.estimation.fee_rate_fuzzer_fraction = 0f64; + conf.estimation.fee_rate_window_size = window_size; + conf.initial_balances.push(InitialBalance { + address: spender_addr.into(), + amount: 10000000000, + }); test_observer::spawn(); test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); @@ -9588,237 +6901,171 @@ fn test_problematic_txs_are_not_stored() { let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); let http_origin = format!("http://{}", &conf.node.rpc_bind); - // something at the limit of the expression depth (will get mined and processed) - let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; - let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); - let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); - let tx_edge_body = format!("{tx_edge_body_start}u1 {tx_edge_body_end}"); - - let tx_edge = make_contract_publish( - &spender_sk_1, - 0, - (tx_edge_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-edge", - &tx_edge_body, - ); - let tx_edge_txid = StacksTransaction::consensus_deserialize(&mut &tx_edge[..]) - .unwrap() - .txid(); - - // something just over the limit of the expression depth - let exceeds_repeat_factor = edge_repeat_factor + 1; - let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - - let tx_exceeds = make_contract_publish( - &spender_sk_2, - 0, - (tx_exceeds_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-exceeds", - &tx_exceeds_body, - ); - let tx_exceeds_txid = StacksTransaction::consensus_deserialize(&mut &tx_exceeds[..]) - .unwrap() - .txid(); - - // something stupidly high over the expression depth - let high_repeat_factor = 128 * 1024; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); - - let tx_high = make_contract_publish( - &spender_sk_3, - 0, - (tx_high_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-high", - &tx_high_body, - ); - let tx_high_txid = StacksTransaction::consensus_deserialize(&mut &tx_high[..]) - .unwrap() - .txid(); - - btc_regtest_controller.bootstrap_chain(201); + btc_regtest_controller.bootstrap_chain(200); eprintln!("Chain bootstrapped..."); let mut run_loop = neon::RunLoop::new(conf.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); + let channel = run_loop.get_coordinator_channel().unwrap(); thread::spawn(move || run_loop.start(None, 0)); - // Give the run loop some time to start up! wait_for_runloop(&blocks_processed); + run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 210, &conf); - // First block wakes up the run loop. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + submit_tx( + &http_origin, + &make_contract_publish( + &spender_sk, + 0, + 110000, + conf.burnchain.chain_id, + "increment-contract", + max_contract_src, + ), + ); + run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 212, &conf); - // Second block will hold our VRF registration. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // Loop 20 times. Each time, execute the same transaction, but increase the amount *paid*. + // This will exercise the window size. + let mut response_estimated_costs = vec![]; + let mut response_top_fee_rates = vec![]; + for i in 1..21 { + submit_tx( + &http_origin, + &make_contract_call( + &spender_sk, + i, // nonce + i * 100000, // payment + conf.burnchain.chain_id, + &spender_addr, + "increment-contract", + "increment-many", + &[], + ), + ); + run_until_burnchain_height( + &mut btc_regtest_controller, + &blocks_processed, + 212 + 2 * i, + &conf, + ); - // Third block will be the first mined Stacks block. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + { + // Read from the fee estimation endpoin. + let path = format!("{http_origin}/v2/fees/transaction"); - submit_tx(&http_origin, &tx_edge); - submit_tx(&http_origin, &tx_exceeds); - submit_tx(&http_origin, &tx_high); + let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { + address: spender_addr, + contract_name: ContractName::from("increment-contract"), + function_name: ClarityName::from("increment-many"), + function_args: vec![], + }); - // only tx_edge should be in the mempool - assert!(get_unconfirmed_tx(&http_origin, &tx_edge_txid).is_some()); - assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_none()); - assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); + let payload_data = tx_payload.serialize_to_vec(); + let payload_hex = format!("0x{}", to_hex(&payload_data)); - channel.stop_chains_coordinator(); -} + let body = json!({ "transaction_payload": payload_hex.clone() }); -fn find_new_files(dirp: &str, prev_files: &HashSet) -> (Vec, HashSet) { - let dirpp = Path::new(dirp); - debug!("readdir {dirp}"); - let cur_files = fs::read_dir(dirp).unwrap(); - let mut new_files = vec![]; - let mut cur_files_set = HashSet::new(); - for cur_file in cur_files.into_iter() { - let cur_file = cur_file.unwrap(); - let cur_file_fullpath = dirpp.join(cur_file.path()).to_str().unwrap().to_string(); - test_debug!("file in {dirp}: {cur_file_fullpath}"); - cur_files_set.insert(cur_file_fullpath.clone()); - if prev_files.contains(&cur_file_fullpath) { - test_debug!("already contains {cur_file_fullpath}"); - continue; + let client = reqwest::blocking::Client::new(); + let fee_rate_result = client + .post(&path) + .json(&body) + .send() + .expect("Should be able to post") + .json::() + .expect("Failed to parse result into JSON"); + + response_estimated_costs.push(fee_rate_result.estimated_cost_scalar); + response_top_fee_rates.push(fee_rate_result.estimations.last().unwrap().fee_rate); } - test_debug!("new file {cur_file_fullpath}"); - new_files.push(cur_file_fullpath); } - debug!( - "Checked {dirp} for new files; found {} (all: {})", - new_files.len(), - cur_files_set.len() - ); - (new_files, cur_files_set) -} - -fn spawn_follower_node( - initial_conf: &Config, -) -> ( - Config, - neon::RunLoopCounter, - PoxSyncWatchdogComms, - CoordinatorChannels, -) { - let bootstrap_node_public_key = { - let keychain = Keychain::default(initial_conf.node.seed.clone()); - let mut pk = keychain.generate_op_signer().get_public_key(); - pk.set_compressed(true); - pk.to_hex() - }; - - let (mut conf, _) = neon_integration_test_conf(); - conf.node.set_bootstrap_nodes( - format!( - "{}@{}", - &bootstrap_node_public_key, initial_conf.node.p2p_bind - ), - conf.burnchain.chain_id, - conf.burnchain.peer_version, - ); - test_observer::register_any(&mut conf); + // Wait two extra blocks to be sure. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - conf.initial_balances = initial_conf.initial_balances.clone(); - conf.burnchain.epochs = initial_conf.burnchain.epochs.clone(); - conf.burnchain.ast_precheck_size_height = initial_conf.burnchain.ast_precheck_size_height; + assert_eq!(response_estimated_costs.len(), response_top_fee_rates.len()); - conf.connection_options.inv_sync_interval = 3; + // Check that: + // 1) The cost is always the same. + // 2) Fee rate grows monotonically. + for i in 1..response_estimated_costs.len() { + let curr_cost = response_estimated_costs[i]; + let last_cost = response_estimated_costs[i - 1]; + assert_eq!(curr_cost, last_cost); - conf.node.always_use_affirmation_maps = false; + let curr_rate = response_top_fee_rates[i]; + let last_rate = response_top_fee_rates[i - 1]; + assert!(curr_rate >= last_rate); + } - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); - let pox_sync = run_loop.get_pox_sync_comms(); + // Check the final value is near input parameter. + assert!(is_close_f64( + *response_top_fee_rates.last().unwrap(), + expected_final_value + )); - thread::spawn(move || run_loop.start(None, 0)); + channel.stop_chains_coordinator(); +} - // Give the run loop some time to start up! - wait_for_runloop(&blocks_processed); +/// Test the FuzzedWeightedMedianFeeRate with window size 5 and randomness 0. We increase the +/// amount paid linearly each time. This estimate should grow *faster* than with window size 10. +#[test] +#[ignore] +fn fuzzed_median_fee_rate_estimation_test_window5() { + fuzzed_median_fee_rate_estimation_test(5, 202680.0992) +} - (conf, blocks_processed, pox_sync, channel) +/// Test the FuzzedWeightedMedianFeeRate with window size 10 and randomness 0. We increase the +/// amount paid linearly each time. This estimate should grow *slower* than with window size 5. +#[test] +#[ignore] +fn fuzzed_median_fee_rate_estimation_test_window10() { + fuzzed_median_fee_rate_estimation_test(10, 90080.5496) } -// TODO: test in epoch 2.1 with parser_v2 #[test] #[ignore] -fn test_problematic_blocks_are_not_mined() { +fn use_latest_tip_integration_test() { + // The purpose of this test is to check if setting the query parameter `tip` to `latest` is working + // as expected. Multiple endpoints accept this parameter, and in this test, we are using the + // GetContractSrc method to test it. + // + // The following scenarios are tested here: + // - The caller does not specify the tip paramater, and the canonical chain tip is used regardless of the + // state of the unconfirmed microblock stream. + // - The caller passes tip=latest with an existing unconfirmed microblock stream, and + // Clarity state from the unconfirmed microblock stream is successfully loaded. + // - The caller passes tip=latest with an empty unconfirmed microblock stream, and + // Clarity state from the canonical chain tip is successfully loaded (i.e. you don't + // get a 404 even though the unconfirmed chain tip points to a nonexistent MARF trie). + // + // Note: In this test, we are manually creating a microblock as well as reloading the unconfirmed + // state of the chainstate, instead of relying on `next_block_and_wait` to generate + // microblocks. We do this because the unconfirmed state is not automatically being initialized + // on the node, so attempting to validate any transactions against the expected unconfirmed + // state fails. if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_mined"; - if fs::metadata(bad_blocks_dir).is_ok() { - fs::remove_dir_all(bad_blocks_dir).unwrap(); - } - fs::create_dir_all(bad_blocks_dir).unwrap(); - - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); - - let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); - let spender_sk_3 = StacksPrivateKey::from_hex(SK_3).unwrap(); - let spender_stacks_addr_1 = to_addr(&spender_sk_1); - let spender_stacks_addr_2 = to_addr(&spender_sk_2); - let spender_stacks_addr_3 = to_addr(&spender_sk_3); - let spender_addr_1: PrincipalData = spender_stacks_addr_1.into(); - let spender_addr_2: PrincipalData = spender_stacks_addr_2.into(); - let spender_addr_3: PrincipalData = spender_stacks_addr_3.into(); + let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let spender_stacks_addr = to_addr(&spender_sk); + let spender_addr: PrincipalData = spender_stacks_addr.into(); let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: spender_addr_1, - amount: 1_000_000_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: spender_addr_2, - amount: 1_000_000_000_000, - }); - conf.initial_balances.push(InitialBalance { - address: spender_addr_3, - amount: 1_000_000_000_000, - }); - - // force mainnet limits in 2.05 for this test - conf.burnchain.epochs = Some(EpochList::new(&[ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 1, - block_limit: BLOCK_LIMIT_MAINNET_20.clone(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 1, - end_height: 10_002, - block_limit: BLOCK_LIMIT_MAINNET_205.clone(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 10_002, - end_height: 9223372036854775807, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ])); - conf.burnchain.pox_2_activation = Some(10_003); + address: spender_addr.clone(), + amount: 100300, + }); - // AST precheck becomes default at burn height - conf.burnchain.ast_precheck_size_height = Some(210); + conf.node.mine_microblocks = true; + conf.node.wait_time_for_microblocks = 10_000; + conf.node.microblock_frequency = 1_000; test_observer::spawn(); test_observer::register_any(&mut conf); @@ -9831,49 +7078,12 @@ fn test_problematic_blocks_are_not_mined() { let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); let http_origin = format!("http://{}", &conf.node.rpc_bind); - // something just over the limit of the expression depth - let exceeds_repeat_factor = 32; - let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - - let tx_exceeds = make_contract_publish( - &spender_sk_2, - 0, - (tx_exceeds_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-exceeds", - &tx_exceeds_body, - ); - let tx_exceeds_txid = StacksTransaction::consensus_deserialize(&mut &tx_exceeds[..]) - .unwrap() - .txid(); - - // something stupidly high over the expression depth - let high_repeat_factor = 3200; - let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); - let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); - - let tx_high = make_contract_publish( - &spender_sk_3, - 0, - (tx_high_body.len() * 100) as u64, - conf.burnchain.chain_id, - "test-high", - &tx_high_body, - ); - let tx_high_txid = StacksTransaction::consensus_deserialize(&mut &tx_high[..]) - .unwrap() - .txid(); - btc_regtest_controller.bootstrap_chain(201); eprintln!("Chain bootstrapped..."); let mut run_loop = neon::RunLoop::new(conf.clone()); let blocks_processed = run_loop.get_blocks_processed_arc(); - let channel = run_loop.get_coordinator_channel().unwrap(); thread::spawn(move || run_loop.start(None, 0)); @@ -9889,219 +7099,328 @@ fn test_problematic_blocks_are_not_mined() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); - submit_tx(&http_origin, &tx_exceeds); - assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); + // Let's query our first spender. + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.balance, 100300); + assert_eq!(account.nonce, 0); + + // this call wakes up our node + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Open chainstate. + // TODO (hack) instantiate the sortdb in the burnchain + let _ = btc_regtest_controller.sortdb_mut(); + let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf); + let tip_hash = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); + let (mut chainstate, _) = StacksChainState::open( + false, + CHAIN_ID_TESTNET, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + // Initialize the unconfirmed state. + chainstate + .reload_unconfirmed_state( + &btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(), + tip_hash, + ) + .unwrap(); + + // Make microblock with two transactions. + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + let transfer_tx = make_stacks_transfer_mblock_only( + &spender_sk, + 0, + 1000, + conf.burnchain.chain_id, + &recipient.into(), + 1000, + ); + + let caller_src = " + (define-public (execute) + (ok stx-liquid-supply)) + "; + let publish_tx = make_contract_publish_microblock_only( + &spender_sk, + 1, + 1000, + conf.burnchain.chain_id, + "caller", + caller_src, + ); + + let tx_1 = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); + let tx_2 = StacksTransaction::consensus_deserialize(&mut &publish_tx[..]).unwrap(); + let vec_tx = vec![tx_1, tx_2]; + let privk = + find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024).unwrap(); + let iconn = btc_regtest_controller + .sortdb_ref() + .index_handle_at_block(&chainstate, &tip_hash) + .unwrap(); + let mblock = make_microblock( + &privk, + &mut chainstate, + &iconn, + consensus_hash, + stacks_block, + vec_tx, + ); + let mut mblock_bytes = vec![]; + mblock.consensus_serialize(&mut mblock_bytes).unwrap(); + + let client = reqwest::blocking::Client::new(); + + // Post the microblock + let path = format!("{http_origin}/v2/microblocks"); + let res: String = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(mblock_bytes.clone()) + .send() + .unwrap() + .json() + .unwrap(); + + assert_eq!(res, format!("{}", &mblock.block_hash())); + + // Wait for the microblock to be accepted + sleep_ms(5_000); + let path = format!("{http_origin}/v2/info"); + let mut iter_count = 0; + loop { + let tip_info = client + .get(&path) + .send() + .unwrap() + .json::() + .unwrap(); + eprintln!("{:#?}", tip_info); + if tip_info.unanchored_tip == Some(StacksBlockId([0; 32])) { + iter_count += 1; + assert!( + iter_count < 10, + "Hit retry count while waiting for net module to process pushed microblock" + ); + sleep_ms(5_000); + continue; + } else { + break; + } + } + + // Wait at least two p2p refreshes so it can produce the microblock. + for i in 0..30 { + info!( + "wait {} more seconds for microblock miner to find our transaction...", + 30 - i + ); + sleep_ms(1000); + } + + // Check event observer for new microblock event (expect 1). + let microblock_events = test_observer::get_microblocks(); + assert_eq!(microblock_events.len(), 1); + + // Don't set the tip parameter, and ask for the source of the contract we just defined in a microblock. + // This should fail because the anchored tip would be unaware of this contract. + let err_opt = get_contract_src( + &http_origin, + spender_stacks_addr, + "caller".to_string(), + false, + ); + match err_opt { + Ok(_) => { + panic!( + "Asking for the contract source off the anchored tip for a contract published \ + only in unconfirmed state should error." + ); + } + // Expect to get "NoSuchContract" because the function we are attempting to call is in a + // contract that only exists on unconfirmed state (and we did not set tip). + Err(err_str) => { + assert!(err_str.contains("No contract source data found")); + } + } + + // Set tip=latest, and ask for the source of the contract defined in the microblock. + // This should succeeed. + assert!(get_contract_src( + &http_origin, + spender_stacks_addr, + "caller".to_string(), + true, + ) + .is_ok()); + + // Mine an anchored block because now we want to have no unconfirmed state. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // Check that the underlying trie for the unconfirmed state does not exist. + assert!(chainstate.unconfirmed_state.is_some()); + let unconfirmed_state = chainstate.unconfirmed_state.as_mut().unwrap(); + let trie_exists = match unconfirmed_state + .clarity_inst + .trie_exists_for_block(&unconfirmed_state.unconfirmed_chain_tip) + { + Ok(res) => res, + Err(e) => { + panic!("error when determining whether or not trie exists: {:?}", e); + } + }; + assert!(!trie_exists); + + // Set tip=latest, and ask for the source of the contract defined in the previous epoch. + // The underlying MARF trie for the unconfirmed tip does not exist, so the transaction will be + // validated against the confirmed chain tip instead of the unconfirmed tip. This should be valid. + assert!(get_contract_src( + &http_origin, + spender_stacks_addr, + "caller".to_string(), + true, + ) + .is_ok()); +} + +#[test] +#[ignore] +fn test_flash_block_skip_tenure() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, miner_account) = neon_integration_test_conf(); + conf.miner.microblock_attempt_time_ms = 5_000; + conf.node.wait_time_for_microblocks = 0; + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); - let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); - let old_tip_info = get_chain_info(&conf); - let mut all_new_files = vec![]; + btc_regtest_controller.bootstrap_chain(201); - for _i in 0..5 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let cur_files_old = cur_files.clone(); - let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); - all_new_files.append(&mut new_files); - cur_files = cur_files_new; - } + eprintln!("Chain bootstrapped..."); - // all blocks were processed - wait_for(30, || { - let tip_info = get_chain_info(&conf); - Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) - }) - .expect("Failed waiting for blocks to be processed"); + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let missed_tenures = run_loop.get_missed_tenures_arc(); - // no blocks considered problematic - assert!(all_new_files.is_empty()); + let channel = run_loop.get_coordinator_channel().unwrap(); - // one block contained tx_exceeds - let blocks = test_observer::get_blocks(); - let mut found = false; - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(..) = &parsed.payload { - if parsed.txid() == tx_exceeds_txid { - found = true; - break; - } - } - } - } + thread::spawn(move || run_loop.start(None, 0)); - assert!(found); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); - let (tip, cur_ast_rules) = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - (tip, cur_ast_rules) - }; + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - assert_eq!(cur_ast_rules, ASTRules::Typical); + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // add another bad tx to the mempool - debug!("Submit problematic tx_high transaction {tx_high_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); - submit_tx(&http_origin, &tx_high); - assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - btc_regtest_controller.build_next_block(1); + // fault injection: force tenures to take too long + std::env::set_var("STX_TEST_SLOW_TENURE", "11000"); - // wait for runloop to advance - wait_for(30, || { - let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - Ok(new_tip.block_height > tip.block_height) - }) - .expect("Failed waiting for blocks to be processed"); + for i in 0..10 { + // build one bitcoin block every 10 seconds + eprintln!("Build bitcoin block +{i}"); + btc_regtest_controller.build_next_block(1); + sleep_ms(10000); + } - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; + // at least one tenure was skipped + let num_skipped = missed_tenures.load(Ordering::SeqCst); + eprintln!("Skipped {num_skipped} tenures"); + assert!(num_skipped > 1); - // new rules took effect - assert_eq!(cur_ast_rules, ASTRules::PrecheckSize); + // let's query the miner's account nonce: - let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); - let old_tip_info = get_chain_info(&conf); - let mut all_new_files = vec![]; + eprintln!("Miner account: {miner_account}"); - eprintln!("old_tip_info = {old_tip_info:?}"); + let account = get_account(&http_origin, &miner_account); + eprintln!("account = {account:?}"); + assert_eq!(account.balance, 0); + assert_eq!(account.nonce, 2); - // mine some blocks, and log problematic blocks - for _i in 0..6 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let cur_files_old = cur_files.clone(); - let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); - all_new_files.append(&mut new_files); - cur_files = cur_files_new; - } + channel.stop_chains_coordinator(); +} - // all blocks were processed - wait_for(30, || { - let tip_info = get_chain_info(&conf); - Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) - }) - .expect("Failed waiting for blocks to be processed"); +#[test] +#[ignore] +fn test_chainwork_first_intervals() { + let (conf, _) = neon_integration_test_conf(); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); - // none were problematic - assert!(all_new_files.is_empty()); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - // recently-submitted problematic transactions are not in the mempool - // (but old ones that were already mined, and thus never considered, could still be present) - test_debug!("Problematic tx {tx_high_txid} should be dropped"); - assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); + btc_regtest_controller.bootstrap_chain(2016 * 2 - 1); - // no block contained the tx_high bad transaction, ever - let blocks = test_observer::get_blocks(); - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(..) = &parsed.payload { - assert!(parsed.txid() != tx_high_txid); - } - } - } + eprintln!("Chain bootstrapped..."); - let new_tip_info = get_chain_info(&conf); + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); - eprintln!("\nBooting follower\n"); + let channel = run_loop.get_coordinator_channel().unwrap(); - // verify that a follower node that boots up with this node as a bootstrap peer will process - // all of the blocks available, even if they are problematic, with the checks on. - let (follower_conf, _, pox_sync_comms, follower_channel) = spawn_follower_node(&conf); + thread::spawn(move || run_loop.start(None, 0)); - eprintln!( - "\nFollower booted on port {},{}\n", - follower_conf.node.p2p_bind, follower_conf.node.rpc_bind - ); + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + channel.stop_chains_coordinator(); +} - // Do not unwrap in case we were just slow - let _ = wait_for(300, || { - let follower_tip_info = get_chain_info(&follower_conf); - eprintln!( - "\nFollower is at burn block {} stacks block {}\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height - ); - Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) - }); +#[test] +#[ignore] +fn test_chainwork_partial_interval() { + let (conf, _) = neon_integration_test_conf(); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); - // make sure we aren't just slow -- wait for the follower to do a few download passes - let num_download_passes = pox_sync_comms.get_download_passes(); - eprintln!( - "\nFollower has performed {num_download_passes} download passes; wait for {}\n", - num_download_passes + 5 - ); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - wait_for(30, || { - let download_passes = pox_sync_comms.get_download_passes(); - eprintln!( - "\nFollower has performed {download_passes} download passes; wait for {}\n", - num_download_passes + 5 - ); - Ok(download_passes >= num_download_passes + 5) - }) - .expect("Failed waiting for follower to perform enough download passes"); + btc_regtest_controller.bootstrap_chain(2016 - 1); - eprintln!( - "\nFollower has performed {} download passes\n", - pox_sync_comms.get_download_passes() - ); + eprintln!("Chain bootstrapped..."); - let follower_tip_info = get_chain_info(&follower_conf); - eprintln!( - "\nFollower is at burn block {} stacks block {}\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height - ); + let mut run_loop = neon::RunLoop::new(conf); + let blocks_processed = run_loop.get_blocks_processed_arc(); - assert_eq!( - follower_tip_info.stacks_tip_height, - new_tip_info.stacks_tip_height - ); + let channel = run_loop.get_coordinator_channel().unwrap(); - test_observer::clear(); + thread::spawn(move || run_loop.start(None, 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); channel.stop_chains_coordinator(); - follower_channel.stop_chains_coordinator(); } -// TODO: test in epoch 2.1 with parser_v2 #[test] #[ignore] -fn test_problematic_blocks_are_not_relayed_or_stored() { +fn test_problematic_txs_are_not_stored() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_relayed_or_stored"; - if fs::metadata(bad_blocks_dir).is_ok() { - fs::remove_dir_all(bad_blocks_dir).unwrap(); - } - fs::create_dir_all(bad_blocks_dir).unwrap(); - - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); - let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_sk_3 = StacksPrivateKey::from_hex(SK_3).unwrap(); @@ -10153,13 +7472,14 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { ])); conf.burnchain.pox_2_activation = Some(10_003); - // AST precheck becomes default at burn height - conf.burnchain.ast_precheck_size_height = Some(210); + // take effect immediately + conf.burnchain.ast_precheck_size_height = Some(0); test_observer::spawn(); test_observer::register_any(&mut conf); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); @@ -10167,8 +7487,26 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); let http_origin = format!("http://{}", &conf.node.rpc_bind); + // something at the limit of the expression depth (will get mined and processed) + let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; + let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); + let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); + let tx_edge_body = format!("{tx_edge_body_start}u1 {tx_edge_body_end}"); + + let tx_edge = make_contract_publish( + &spender_sk_1, + 0, + (tx_edge_body.len() * 100) as u64, + conf.burnchain.chain_id, + "test-edge", + &tx_edge_body, + ); + let tx_edge_txid = StacksTransaction::consensus_deserialize(&mut &tx_edge[..]) + .unwrap() + .txid(); + // something just over the limit of the expression depth - let exceeds_repeat_factor = 32; + let exceeds_repeat_factor = edge_repeat_factor + 1; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); @@ -10185,7 +7523,8 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { .unwrap() .txid(); - let high_repeat_factor = 70; + // something stupidly high over the expression depth + let high_repeat_factor = 128 * 1024; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); @@ -10224,249 +7563,101 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); + submit_tx(&http_origin, &tx_edge); submit_tx(&http_origin, &tx_exceeds); - assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); - - let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); - let old_tip_info = get_chain_info(&conf); - let mut all_new_files = vec![]; - - for _i in 0..5 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let cur_files_old = cur_files.clone(); - let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); - all_new_files.append(&mut new_files); - cur_files = cur_files_new; - } - - let tip_info = get_chain_info(&conf); - - // blocks were all processed - assert_eq!( - tip_info.stacks_tip_height, - old_tip_info.stacks_tip_height + 5 - ); - // no blocks considered problematic - assert!(all_new_files.is_empty()); - - // one block contained tx_exceeds - let blocks = test_observer::get_blocks(); - let mut found = false; - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(..) = &parsed.payload { - if parsed.txid() == tx_exceeds_txid { - found = true; - break; - } - } - } - } - - assert!(found); - - let (tip, cur_ast_rules) = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - (tip, cur_ast_rules) - }; - - assert_eq!(cur_ast_rules, ASTRules::Typical); - - btc_regtest_controller.build_next_block(1); - - // wait for runloop to advance - loop { - sleep_ms(1_000); - let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - if new_tip.block_height > tip.block_height { - break; - } - } - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; - - // new rules took effect - assert_eq!(cur_ast_rules, ASTRules::PrecheckSize); - - // the follower we will soon boot up will start applying the new AST rules at this height. - // Make it so the miner does *not* follow the rules - { - let sortdb = btc_regtest_controller.sortdb_mut(); - let mut tx = sortdb.tx_begin().unwrap(); - SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 10_000).unwrap(); - tx.commit().unwrap(); - } - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - eprintln!("Sort db tip: {}", tip.block_height); - let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; - - // we reverted to the old rules (but the follower won't) - assert_eq!(cur_ast_rules, ASTRules::Typical); - - // add another bad tx to the mempool. - // because the miner is now non-conformant, it should mine this tx. - debug!("Submit problematic tx_high transaction {tx_high_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); - assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); - - let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); - let old_tip_info = get_chain_info(&conf); - let mut all_new_files = vec![]; - - eprintln!("old_tip_info = {old_tip_info:?}"); - // mine some blocks, and log problematic blocks - for _i in 0..6 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let cur_files_old = cur_files.clone(); - let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); - all_new_files.append(&mut new_files); - cur_files = cur_files_new; - - let cur_ast_rules = { - let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let cur_ast_rules = - SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - cur_ast_rules - }; - - // we reverted to the old rules (but the follower won't) - assert_eq!(cur_ast_rules, ASTRules::Typical); - } - - let tip_info = get_chain_info(&conf); + // only tx_edge should be in the mempool + assert!(get_unconfirmed_tx(&http_origin, &tx_edge_txid).is_some()); + assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_none()); + assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); - // at least one block was mined (hard to say how many due to the raciness between the burnchain - // downloader and this thread). - info!( - "tip_info.stacks_tip_height = {}, old_tip_info.stacks_tip_height = {}", - tip_info.stacks_tip_height, old_tip_info.stacks_tip_height - ); - assert!(tip_info.stacks_tip_height > old_tip_info.stacks_tip_height); - // one was problematic -- i.e. the one that included tx_high - assert_eq!(all_new_files.len(), 1); + channel.stop_chains_coordinator(); +} - // tx_high got mined by the miner - let blocks = test_observer::get_blocks(); - let mut bad_block_height = None; - for block in blocks { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(..) = &parsed.payload { - if parsed.txid() == tx_high_txid { - bad_block_height = Some(block.get("block_height").unwrap().as_u64().unwrap()); - } - } +fn find_new_files(dirp: &str, prev_files: &HashSet) -> (Vec, HashSet) { + let dirpp = Path::new(dirp); + debug!("readdir {dirp}"); + let cur_files = fs::read_dir(dirp).unwrap(); + let mut new_files = vec![]; + let mut cur_files_set = HashSet::new(); + for cur_file in cur_files.into_iter() { + let cur_file = cur_file.unwrap(); + let cur_file_fullpath = dirpp.join(cur_file.path()).to_str().unwrap().to_string(); + test_debug!("file in {dirp}: {cur_file_fullpath}"); + cur_files_set.insert(cur_file_fullpath.clone()); + if prev_files.contains(&cur_file_fullpath) { + test_debug!("already contains {cur_file_fullpath}"); + continue; } + test_debug!("new file {cur_file_fullpath}"); + new_files.push(cur_file_fullpath); } - assert!(bad_block_height.is_some()); - let bad_block_height = bad_block_height.unwrap(); - - // follower should not process bad_block_height or higher - let new_tip_info = get_chain_info(&conf); - - eprintln!("\nBooting follower\n"); - - // verify that a follower node that boots up with this node as a bootstrap peer will process - // all of the blocks available, even if they are problematic, with the checks on. - let (follower_conf, _, pox_sync_comms, follower_channel) = spawn_follower_node(&conf); - - eprintln!( - "\nFollower booted on port {},{}\n", - follower_conf.node.p2p_bind, follower_conf.node.rpc_bind + debug!( + "Checked {dirp} for new files; found {} (all: {})", + new_files.len(), + cur_files_set.len() ); + (new_files, cur_files_set) +} - let deadline = get_epoch_time_secs() + 300; - while get_epoch_time_secs() < deadline { - let follower_tip_info = get_chain_info(&follower_conf); - if follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height - || follower_tip_info.stacks_tip_height + 1 == bad_block_height - { - break; - } - eprintln!( - "\nFollower is at burn block {} stacks block {} (bad_block is {bad_block_height})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height - ); - sleep_ms(1000); - } +fn spawn_follower_node( + initial_conf: &Config, +) -> ( + Config, + neon::RunLoopCounter, + PoxSyncWatchdogComms, + CoordinatorChannels, +) { + let bootstrap_node_public_key = { + let keychain = Keychain::default(initial_conf.node.seed.clone()); + let mut pk = keychain.generate_op_signer().get_public_key(); + pk.set_compressed(true); + pk.to_hex() + }; - // make sure we aren't just slow -- wait for the follower to do a few download passes - let num_download_passes = pox_sync_comms.get_download_passes(); - eprintln!( - "\nFollower has performed {num_download_passes} download passes; wait for {}\n", - num_download_passes + 5 + let (mut conf, _) = neon_integration_test_conf(); + conf.node.set_bootstrap_nodes( + format!( + "{}@{}", + &bootstrap_node_public_key, initial_conf.node.p2p_bind + ), + conf.burnchain.chain_id, + conf.burnchain.peer_version, ); - while num_download_passes + 5 > pox_sync_comms.get_download_passes() { - sleep_ms(1000); - eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - pox_sync_comms.get_download_passes(), - num_download_passes + 5 - ); - } + test_observer::register_any(&mut conf); - eprintln!( - "\nFollower has performed {} download passes\n", - pox_sync_comms.get_download_passes() - ); + conf.initial_balances = initial_conf.initial_balances.clone(); + conf.burnchain.epochs = initial_conf.burnchain.epochs.clone(); + conf.burnchain.ast_precheck_size_height = initial_conf.burnchain.ast_precheck_size_height; - let follower_tip_info = get_chain_info(&follower_conf); - eprintln!( - "\nFollower is at burn block {} stacks block {} (bad block is {bad_block_height})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height - ); + conf.connection_options.inv_sync_interval = 3; - // follower rejects the bad block - assert_eq!(follower_tip_info.stacks_tip_height, bad_block_height - 1); + conf.node.always_use_affirmation_maps = false; - test_observer::clear(); - channel.stop_chains_coordinator(); - follower_channel.stop_chains_coordinator(); + let mut run_loop = neon::RunLoop::new(conf.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let channel = run_loop.get_coordinator_channel().unwrap(); + let pox_sync = run_loop.get_pox_sync_comms(); + + thread::spawn(move || run_loop.start(None, 0)); + + // Give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + (conf, blocks_processed, pox_sync, channel) } // TODO: test in epoch 2.1 with parser_v2 #[test] #[ignore] -fn test_problematic_microblocks_are_not_mined() { +fn test_problematic_blocks_are_not_mined() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_mined"; + let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_mined"; if fs::metadata(bad_blocks_dir).is_ok() { fs::remove_dir_all(bad_blocks_dir).unwrap(); } @@ -10528,12 +7719,6 @@ fn test_problematic_microblocks_are_not_mined() { // AST precheck becomes default at burn height conf.burnchain.ast_precheck_size_height = Some(210); - // mine microblocks - conf.node.mine_microblocks = true; - conf.node.microblock_frequency = 1_000; - conf.miner.microblock_attempt_time_ms = 1_000; - conf.node.wait_time_for_microblocks = 0; - test_observer::spawn(); test_observer::register_any(&mut conf); @@ -10551,7 +7736,7 @@ fn test_problematic_microblocks_are_not_mined() { let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - let tx_exceeds = make_contract_publish_microblock_only( + let tx_exceeds = make_contract_publish( &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, @@ -10564,12 +7749,12 @@ fn test_problematic_microblocks_are_not_mined() { .txid(); // something stupidly high over the expression depth - let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = 3200; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); - let tx_high = make_contract_publish_microblock_only( + let tx_high = make_contract_publish( &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, @@ -10603,12 +7788,11 @@ fn test_problematic_microblocks_are_not_mined() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); - info!("Submitted problematic tx_exceeds transaction {tx_exceeds_txid}"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10620,26 +7804,23 @@ fn test_problematic_microblocks_are_not_mined() { let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); all_new_files.append(&mut new_files); cur_files = cur_files_new; - - // give the microblock miner a chance - sleep_ms(5_000); } - // microblocks and blocks were all processed + // all blocks were processed wait_for(30, || { let tip_info = get_chain_info(&conf); Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) }) - .expect("Failed waiting for microblocks to be processed"); + .expect("Failed waiting for blocks to be processed"); - // no microblocks considered problematic + // no blocks considered problematic assert!(all_new_files.is_empty()); - // one microblock contained tx_exceeds - let microblocks = test_observer::get_microblocks(); + // one block contained tx_exceeds + let blocks = test_observer::get_blocks(); let mut found = false; - for microblock in microblocks { - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { @@ -10669,15 +7850,13 @@ fn test_problematic_microblocks_are_not_mined() { assert_eq!(cur_ast_rules, ASTRules::Typical); // add another bad tx to the mempool - info!("Submit problematic tx_high transaction {tx_high_txid}"); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); - info!("Submitted problematic tx_high transaction {tx_high_txid}"); btc_regtest_controller.build_next_block(1); - info!("Mined block after submitting problematic tx_high transaction {tx_high_txid}"); // wait for runloop to advance wait_for(30, || { @@ -10685,7 +7864,7 @@ fn test_problematic_microblocks_are_not_mined() { let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) - .expect("Failed waiting for runloop to advance"); + .expect("Failed waiting for blocks to be processed"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); @@ -10704,24 +7883,21 @@ fn test_problematic_microblocks_are_not_mined() { eprintln!("old_tip_info = {old_tip_info:?}"); - // mine some microblocks, and log problematic microblocks + // mine some blocks, and log problematic blocks for _i in 0..6 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); all_new_files.append(&mut new_files); cur_files = cur_files_new; - - // give the microblock miner a chance - sleep_ms(5_000); } - // sleep a little longer before checking tip info; this should help with test flakiness + // all blocks were processed wait_for(30, || { let tip_info = get_chain_info(&conf); Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) }) - .expect("Failed waiting for microblocks to be processed"); + .expect("Failed waiting for blocks to be processed"); // none were problematic assert!(all_new_files.is_empty()); @@ -10731,10 +7907,10 @@ fn test_problematic_microblocks_are_not_mined() { test_debug!("Problematic tx {tx_high_txid} should be dropped"); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); - // no microblock contained the tx_high bad transaction, ever - let microblocks = test_observer::get_microblocks(); - for microblock in microblocks { - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); + // no block contained the tx_high bad transaction, ever + let blocks = test_observer::get_blocks(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { @@ -10743,7 +7919,7 @@ fn test_problematic_microblocks_are_not_mined() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(..) = &parsed.payload { - assert_ne!(parsed.txid(), tx_high_txid); + assert!(parsed.txid() != tx_high_txid); } } } @@ -10761,12 +7937,12 @@ fn test_problematic_microblocks_are_not_mined() { follower_conf.node.p2p_bind, follower_conf.node.rpc_bind ); - // Do not unwrap as we may just be slow + // Do not unwrap in case we were just slow let _ = wait_for(300, || { let follower_tip_info = get_chain_info(&follower_conf); eprintln!( "\nFollower is at burn block {} stacks block {}\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) }); @@ -10812,12 +7988,12 @@ fn test_problematic_microblocks_are_not_mined() { // TODO: test in epoch 2.1 with parser_v2 #[test] #[ignore] -fn test_problematic_microblocks_are_not_relayed_or_stored() { +fn test_problematic_blocks_are_not_relayed_or_stored() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_relayed_or_stored"; + let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_relayed_or_stored"; if fs::metadata(bad_blocks_dir).is_ok() { fs::remove_dir_all(bad_blocks_dir).unwrap(); } @@ -10879,14 +8055,6 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // AST precheck becomes default at burn height conf.burnchain.ast_precheck_size_height = Some(210); - // mine microblocks - conf.node.mine_microblocks = true; - conf.node.microblock_frequency = 1_000; - conf.miner.microblock_attempt_time_ms = 1_000; - conf.node.wait_time_for_microblocks = 0; - - conf.connection_options.inv_sync_interval = 3; - test_observer::spawn(); test_observer::register_any(&mut conf); @@ -10904,7 +8072,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - let tx_exceeds = make_contract_publish_microblock_only( + let tx_exceeds = make_contract_publish( &spender_sk_2, 0, (tx_exceeds_body.len() * 100) as u64, @@ -10916,13 +8084,12 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { .unwrap() .txid(); - // greatly exceeds AST depth, but is still mineable without a stack overflow - let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = 70; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); - let tx_high = make_contract_publish_microblock_only( + let tx_high = make_contract_publish( &spender_sk_3, 0, (tx_high_body.len() * 100) as u64, @@ -10972,26 +8139,23 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); all_new_files.append(&mut new_files); cur_files = cur_files_new; - - // give the microblock miner a chance - sleep_ms(5_000); } - // microblocks and blocks were all processed - wait_for(30, || { - let tip_info = get_chain_info(&conf); - Ok(tip_info.stacks_tip_height == old_tip_info.stacks_tip_height + 5) - }) - .expect("Failed waiting for microblocks to be processed"); + let tip_info = get_chain_info(&conf); - // no microblocks considered problematic + // blocks were all processed + assert_eq!( + tip_info.stacks_tip_height, + old_tip_info.stacks_tip_height + 5 + ); + // no blocks considered problematic assert!(all_new_files.is_empty()); - // one microblock contained tx_exceeds - let microblocks = test_observer::get_microblocks(); + // one block contained tx_exceeds + let blocks = test_observer::get_blocks(); let mut found = false; - for microblock in microblocks { - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { @@ -11023,13 +8187,14 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { btc_regtest_controller.build_next_block(1); // wait for runloop to advance - wait_for(30, || { + loop { + sleep_ms(1_000); let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - Ok(new_tip.block_height > tip.block_height) - }) - .expect("Failed waiting for runloop to advance"); - + if new_tip.block_height > tip.block_height { + break; + } + } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -11063,7 +8228,6 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // add another bad tx to the mempool. // because the miner is now non-conformant, it should mine this tx. debug!("Submit problematic tx_high transaction {tx_high_txid}"); - std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); @@ -11075,7 +8239,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { eprintln!("old_tip_info = {old_tip_info:?}"); - // mine some blocks, and log problematic microblocks + // mine some blocks, and log problematic blocks for _i in 0..6 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); @@ -11093,28 +8257,25 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // we reverted to the old rules (but the follower won't) assert_eq!(cur_ast_rules, ASTRules::Typical); - - // give the microblock miner a chance - sleep_ms(5_000); } - // sleep a little longer before checking tip info; this should help with test flakiness - wait_for(30, || { - let tip_info = get_chain_info(&conf); - Ok(tip_info.stacks_tip_height >= old_tip_info.stacks_tip_height + 5) - }) - .expect("Failed waiting for microblocks to be processed"); + let tip_info = get_chain_info(&conf); - // at least one was problematic. - // the miner might make multiple microblocks (only some of which are confirmed), so also check - // the event observer to see that we actually picked up tx_high - assert!(!all_new_files.is_empty()); + // at least one block was mined (hard to say how many due to the raciness between the burnchain + // downloader and this thread). + info!( + "tip_info.stacks_tip_height = {}, old_tip_info.stacks_tip_height = {}", + tip_info.stacks_tip_height, old_tip_info.stacks_tip_height + ); + assert!(tip_info.stacks_tip_height > old_tip_info.stacks_tip_height); + // one was problematic -- i.e. the one that included tx_high + assert_eq!(all_new_files.len(), 1); // tx_high got mined by the miner - let microblocks = test_observer::get_microblocks(); - let mut bad_block_id = None; - for microblock in microblocks { - let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); + let blocks = test_observer::get_blocks(); + let mut bad_block_height = None; + for block in blocks { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { @@ -11124,26 +8285,13 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(..) = &parsed.payload { if parsed.txid() == tx_high_txid { - bad_block_id = { - let parts: Vec<_> = microblock - .get("parent_index_block_hash") - .unwrap() - .as_str() - .unwrap() - .split("0x") - .collect(); - let bad_block_id_hex = parts[1]; - debug!("bad_block_id_hex = '{bad_block_id_hex}'"); - Some(StacksBlockId::from_hex(bad_block_id_hex).unwrap()) - }; + bad_block_height = Some(block.get("block_height").unwrap().as_u64().unwrap()); } } } } - assert!(bad_block_id.is_some()); - let bad_block_id = bad_block_id.unwrap(); - let bad_block = get_block(&http_origin, &bad_block_id).unwrap(); - let bad_block_height = bad_block.header.total_work.work; + assert!(bad_block_height.is_some()); + let bad_block_height = bad_block_height.unwrap(); // follower should not process bad_block_height or higher let new_tip_info = get_chain_info(&conf); @@ -11159,15 +8307,20 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { follower_conf.node.p2p_bind, follower_conf.node.rpc_bind ); - // Do not unwrap as we may just be slow - let _ = wait_for(300, || { + let deadline = get_epoch_time_secs() + 300; + while get_epoch_time_secs() < deadline { let follower_tip_info = get_chain_info(&follower_conf); + if follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height + || follower_tip_info.stacks_tip_height + 1 == bad_block_height + { + break; + } eprintln!( - "\nFollower is at burn block {} stacks block {}\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, + "\nFollower is at burn block {} stacks block {} (bad_block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); - Ok(follower_tip_info.stacks_tip_height == new_tip_info.stacks_tip_height) - }); + sleep_ms(1000); + } // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); @@ -11176,15 +8329,15 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { num_download_passes + 5 ); - wait_for(30, || { - let download_passes = pox_sync_comms.get_download_passes(); + while num_download_passes + 5 > pox_sync_comms.get_download_passes() { + sleep_ms(1000); eprintln!( - "\nFollower has performed {download_passes} download passes; wait for {}\n", + "\nFollower has performed {} download passes; wait for {}\n", + pox_sync_comms.get_download_passes(), num_download_passes + 5 ); - Ok(download_passes >= num_download_passes + 5) - }) - .expect("Failed waiting for follower to perform enough download passes"); + } + eprintln!( "\nFollower has performed {} download passes\n", pox_sync_comms.get_download_passes() @@ -11196,8 +8349,8 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); - // follower rejects the bad microblock -- can't append subsequent blocks - assert_eq!(follower_tip_info.stacks_tip_height, bad_block_height); + // follower rejects the bad block + assert_eq!(follower_tip_info.stacks_tip_height, bad_block_height - 1); test_observer::clear(); channel.stop_chains_coordinator(); @@ -11727,114 +8880,6 @@ fn test_competing_miners_build_anchor_blocks_on_same_chain_without_rbf() { test_competing_miners_build_on_same_chain(5, conf, false, 10_000, TxChainStrategy::Expensive) } -// TODO: this needs to run as a smoke test, since they take too long to run in CI -#[test] -#[ignore] -fn test_competing_miners_build_anchor_blocks_and_microblocks_on_same_chain() { - let (mut conf, _) = neon_integration_test_conf(); - - conf.node.mine_microblocks = true; - conf.miner.microblock_attempt_time_ms = 2_000; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 0; - conf.miner.first_attempt_time_ms = 2_000; - conf.miner.subsequent_attempt_time_ms = 5_000; - conf.burnchain.max_rbf = 1000000; - conf.node.wait_time_for_blocks = 1_000; - - test_competing_miners_build_on_same_chain(5, conf, true, 15_000, TxChainStrategy::Random) -} - -#[test] -#[ignore] -fn microblock_miner_multiple_attempts() { - let (mut conf, miner_account) = neon_integration_test_conf(); - let chain_id = conf.burnchain.chain_id; - - conf.node.mine_microblocks = true; - conf.miner.microblock_attempt_time_ms = 2_000; - conf.node.wait_time_for_microblocks = 100; - conf.node.microblock_frequency = 100; - conf.miner.first_attempt_time_ms = 2_000; - conf.miner.subsequent_attempt_time_ms = 5_000; - conf.burnchain.max_rbf = 1000000; - conf.node.wait_time_for_blocks = 1_000; - - let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::random()).collect(); - let balances: Vec<_> = privks - .iter() - .map(|privk| { - let addr = to_addr(privk); - InitialBalance { - address: addr.into(), - amount: 1_000_000_000, - } - }) - .collect(); - - conf.initial_balances = balances; - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // let's query the miner's account nonce: - - let account = get_account(&http_origin, &miner_account); - eprintln!("Miner account: {account:?}"); - - let all_txs: Vec<_> = privks - .iter() - .enumerate() - .map(|(i, pk)| make_mblock_tx_chain(pk, (25 * i) as u64, chain_id)) - .collect(); - - let _handle = thread::spawn(move || { - for (i, txi) in all_txs.iter().enumerate() { - for (j, tx) in txi.iter().enumerate() { - eprintln!("\n\nSubmit tx {i},{j}\n\n"); - submit_tx(&http_origin, tx); - sleep_ms(1_000); - } - } - }); - - for _i in 0..10 { - sleep_ms(30_000); - btc_regtest_controller.build_next_block(1); - } - - channel.stop_chains_coordinator(); -} - #[test] #[ignore] fn min_txs() { From 967b6d1674756374c441fd8a5d29eb7d2d221334 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 12 Mar 2025 17:22:56 -0400 Subject: [PATCH 110/238] feat: improve `NextNonceWithHighestFeeRate` algorithm When the query runs out of transactions, flush the nonces to the DB and run the query again until we get no more candidates or we hit some other exit condition. --- stackslib/src/chainstate/stacks/miner.rs | 14 +- stackslib/src/core/mempool.rs | 405 ++++++++++++----------- stackslib/src/core/nonce_cache.rs | 1 + 3 files changed, 224 insertions(+), 196 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 0cd782a2210..f6515d2ffdf 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2315,7 +2315,7 @@ impl StacksBlockBuilder { blocked = (*settings.miner_status.lock().expect("FATAL: mutex poisoned")) .is_blocked(); if blocked { - debug!("Miner stopping due to preemption"); + info!("Miner stopping due to preemption"); return Ok(None); } @@ -2323,16 +2323,20 @@ impl StacksBlockBuilder { let update_estimator = to_consider.update_estimate; if block_limit_hit == BlockLimitFunction::LIMIT_REACHED { + info!("Miner stopping due to limit reached"); return Ok(None); } let time_now = get_epoch_time_ms(); if time_now >= deadline { - debug!("Miner mining time exceeded ({} ms)", max_miner_time_ms); + info!( + "Miner stopping due to mining time exceeded ({} ms)", + max_miner_time_ms + ); return Ok(None); } if let Some(time_estimate) = txinfo.metadata.time_estimate_ms { if time_now.saturating_add(time_estimate.into()) > deadline { - debug!("Mining tx would cause us to exceed our deadline, skipping"; + info!("Mining tx would cause us to exceed our deadline, skipping"; "txid" => %txinfo.tx.txid(), "deadline" => deadline, "now" => time_now, @@ -2440,9 +2444,7 @@ impl StacksBlockBuilder { } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT { - debug!( - "Stop mining anchored block due to limit exceeded" - ); + info!("Miner stopping due to limit reached"); block_limit_hit = BlockLimitFunction::LIMIT_REACHED; return Ok(None); } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index a1eaabdbc61..d3b83403050 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -146,8 +146,11 @@ pub enum MemPoolSyncData { TxTags([u8; 32], Vec), } +#[derive(Debug, PartialEq)] pub enum MempoolIterationStopReason { + /// No more candidates in the mempool to consider NoMoreCandidates, + /// The mining deadline has been reached DeadlineReached, /// If the iteration function supplied to mempool iteration exited /// (i.e., the transaction evaluator returned an early exit command) @@ -1039,7 +1042,7 @@ impl<'a> MemPoolTx<'a> { } } -#[cfg(test)] +#[cfg(any(test, feature = "testing"))] pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM nonces"; let mut stmt = conn.prepare(sql).map_err(db_error::SqliteError)?; @@ -1523,25 +1526,28 @@ impl MemPoolDB { .query(NO_PARAMS) .map_err(Error::SqliteError)?; - // == Query for `NextNonceWithHighestFeeRate` mempool walk strategy - // - // Selects the next mempool transaction to consider using a heuristic that maximizes miner fee profitability and minimizes - // CPU time wasted on already-mined or not-yet-mineable transactions. This heuristic takes the following steps: - // - // 1. Filters out transactions to consider only those that have the next expected nonce for both the origin and sponsor, - // when possible - // 2. Adds a "simulated" fee rate to transactions that don't have it by multiplying the mempool's maximum current fee rate - // by a random number. This helps us mix these transactions with others to guarantee they get processed in a reasonable - // order - // 3. Ranks transactions by prioritizing those with next nonces and higher fees (per origin and sponsor address) - // 4. Takes the top ranked transaction and returns it for evaluation - // - // This logic prevents miners from repeatedly visiting (and then skipping) high fee transactions that would get evaluated - // first based on their `fee_rate` but are otherwise non-mineable because they have very high or invalid nonces. A large - // volume of these transactions would cause considerable slowness when selecting valid transactions to mine. This query - // also makes sure transactions that have NULL `fee_rate`s are visited, because they will also get ranked according to - // their origin address nonce. - let sql = " + let stop_reason = loop { + let mut state_changed = false; + + // == Query for `NextNonceWithHighestFeeRate` mempool walk strategy + // + // Selects the next mempool transaction to consider using a heuristic that maximizes miner fee profitability and minimizes + // CPU time wasted on already-mined or not-yet-mineable transactions. This heuristic takes the following steps: + // + // 1. Filters out transactions to consider only those that have the next expected nonce for both the origin and sponsor, + // when possible + // 2. Adds a "simulated" fee rate to transactions that don't have it by multiplying the mempool's maximum current fee rate + // by a random number. This helps us mix these transactions with others to guarantee they get processed in a reasonable + // order + // 3. Ranks transactions by prioritizing those with next nonces and higher fees (per origin and sponsor address) + // 4. Takes the top ranked transaction and returns it for evaluation + // + // This logic prevents miners from repeatedly visiting (and then skipping) high fee transactions that would get evaluated + // first based on their `fee_rate` but are otherwise non-mineable because they have very high or invalid nonces. A large + // volume of these transactions would cause considerable slowness when selecting valid transactions to mine. This query + // also makes sure transactions that have NULL `fee_rate`s are visited, because they will also get ranked according to + // their origin address nonce. + let sql = " WITH nonce_filtered AS ( SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate, CASE @@ -1570,164 +1576,166 @@ impl MemPoolDB { FROM address_nonce_ranked ORDER BY origin_rank ASC, sponsor_rank ASC, sort_fee_rate DESC "; - let mut query_stmt_nonce_rank = self.db.prepare(&sql).map_err(Error::SqliteError)?; - let mut nonce_rank_iterator = query_stmt_nonce_rank - .query(NO_PARAMS) - .map_err(Error::SqliteError)?; - - let stop_reason = loop { - if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { - debug!("Mempool iteration deadline exceeded"; + let mut query_stmt_nonce_rank = self.db.prepare(&sql).map_err(Error::SqliteError)?; + let mut nonce_rank_iterator = query_stmt_nonce_rank + .query(NO_PARAMS) + .map_err(Error::SqliteError)?; + + let stop_reason = loop { + if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { + debug!("Mempool: iteration deadline exceeded"; "deadline_ms" => settings.max_walk_time_ms); - break MempoolIterationStopReason::DeadlineReached; - } + break MempoolIterationStopReason::DeadlineReached; + } - // First, try to read from the retry list - let (candidate, update_estimate) = match settings.strategy { - MemPoolWalkStrategy::GlobalFeeRate => { - let start_with_no_estimate = tx_consideration_sampler.sample(&mut rng) - < settings.consider_no_estimate_tx_prob; - // randomly select from either the null fee-rate transactions or those with fee-rate estimates. - let opt_tx = if start_with_no_estimate { - null_iterator.next().map_err(Error::SqliteError)? - } else { - fee_iterator.next().map_err(Error::SqliteError)? - }; - match opt_tx { - Some(row) => (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate), - None => { - // If the selected iterator is empty, check the other - match if start_with_no_estimate { - fee_iterator.next().map_err(Error::SqliteError)? - } else { - null_iterator.next().map_err(Error::SqliteError)? - } { - Some(row) => ( - MemPoolTxInfoPartial::from_row(row)?, - !start_with_no_estimate, - ), - None => { - debug!("No more transactions to consider in mempool"); - break MempoolIterationStopReason::NoMoreCandidates; + // First, try to read from the retry list + let (candidate, update_estimate) = match settings.strategy { + MemPoolWalkStrategy::GlobalFeeRate => { + let start_with_no_estimate = tx_consideration_sampler.sample(&mut rng) + < settings.consider_no_estimate_tx_prob; + // randomly select from either the null fee-rate transactions or those with fee-rate estimates. + let opt_tx = if start_with_no_estimate { + null_iterator.next().map_err(Error::SqliteError)? + } else { + fee_iterator.next().map_err(Error::SqliteError)? + }; + match opt_tx { + Some(row) => { + (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate) + } + None => { + // If the selected iterator is empty, check the other + match if start_with_no_estimate { + fee_iterator.next().map_err(Error::SqliteError)? + } else { + null_iterator.next().map_err(Error::SqliteError)? + } { + Some(row) => ( + MemPoolTxInfoPartial::from_row(row)?, + !start_with_no_estimate, + ), + None => { + break MempoolIterationStopReason::NoMoreCandidates; + } } } } } - } - MemPoolWalkStrategy::NextNonceWithHighestFeeRate => { - match nonce_rank_iterator.next().map_err(Error::SqliteError)? { - Some(row) => { - let tx = MemPoolTxInfoPartial::from_row(row)?; - let update_estimate = tx.fee_rate.is_none(); - (tx, update_estimate) - } - None => { - debug!("No more transactions to consider in mempool"); - break MempoolIterationStopReason::NoMoreCandidates; + MemPoolWalkStrategy::NextNonceWithHighestFeeRate => { + match nonce_rank_iterator.next().map_err(Error::SqliteError)? { + Some(row) => { + let tx = MemPoolTxInfoPartial::from_row(row)?; + let update_estimate = tx.fee_rate.is_none(); + (tx, update_estimate) + } + None => { + break MempoolIterationStopReason::NoMoreCandidates; + } } } - } - }; + }; - // Check the nonces. - let mut nonce_conn = self.reopen(false)?; - let expected_origin_nonce = - nonce_cache.get(&candidate.origin_address, clarity_tx, &mut nonce_conn); - let expected_sponsor_nonce = - nonce_cache.get(&candidate.sponsor_address, clarity_tx, &mut nonce_conn); - - match order_nonces( - candidate.origin_nonce, - expected_origin_nonce, - candidate.sponsor_nonce, - expected_sponsor_nonce, - ) { - Ordering::Less => { - debug!( - "Mempool: unexecutable: drop tx"; - "txid" => %candidate.txid, - "tx_origin_addr" => %candidate.origin_address, - "tx_origin_nonce" => candidate.origin_nonce, - "fee_rate" => candidate.fee_rate.unwrap_or_default(), - "expected_origin_nonce" => expected_origin_nonce, - "expected_sponsor_nonce" => expected_sponsor_nonce, - ); - // This transaction cannot execute in this pass, just drop it - continue; - } - Ordering::Greater => { - debug!( - "Mempool: nonces too high"; - "txid" => %candidate.txid, - "tx_origin_addr" => %candidate.origin_address, - "tx_origin_nonce" => candidate.origin_nonce, - "fee_rate" => candidate.fee_rate.unwrap_or_default(), - "expected_origin_nonce" => expected_origin_nonce, - "expected_sponsor_nonce" => expected_sponsor_nonce, - ); - continue; - } - Ordering::Equal => { - // Candidate transaction: fall through - } - }; + state_changed = true; + + // Check the nonces. + let mut nonce_conn = self.reopen(false)?; + let expected_origin_nonce = + nonce_cache.get(&candidate.origin_address, clarity_tx, &mut nonce_conn); + let expected_sponsor_nonce = + nonce_cache.get(&candidate.sponsor_address, clarity_tx, &mut nonce_conn); + + match order_nonces( + candidate.origin_nonce, + expected_origin_nonce, + candidate.sponsor_nonce, + expected_sponsor_nonce, + ) { + Ordering::Less => { + debug!( + "Mempool: unexecutable: drop tx"; + "txid" => %candidate.txid, + "tx_origin_addr" => %candidate.origin_address, + "tx_origin_nonce" => candidate.origin_nonce, + "fee_rate" => candidate.fee_rate.unwrap_or_default(), + "expected_origin_nonce" => expected_origin_nonce, + "expected_sponsor_nonce" => expected_sponsor_nonce, + ); + // This transaction cannot execute in this pass, just drop it + continue; + } + Ordering::Greater => { + debug!( + "Mempool: nonces too high"; + "txid" => %candidate.txid, + "tx_origin_addr" => %candidate.origin_address, + "tx_origin_nonce" => candidate.origin_nonce, + "fee_rate" => candidate.fee_rate.unwrap_or_default(), + "expected_origin_nonce" => expected_origin_nonce, + "expected_sponsor_nonce" => expected_sponsor_nonce, + ); + continue; + } + Ordering::Equal => { + // Candidate transaction: fall through + } + }; - // Read in and deserialize the transaction. - let tx_info_option = MemPoolDB::get_tx(self.conn(), &candidate.txid)?; - let tx_info = match tx_info_option { - Some(tx) => tx, - None => { - // Note: Don't panic here because maybe the state has changed from garbage collection. - warn!("Miner: could not find a tx for id {:?}", &candidate.txid); - continue; - } - }; + // Read in and deserialize the transaction. + let tx_info_option = MemPoolDB::get_tx(self.conn(), &candidate.txid)?; + let tx_info = match tx_info_option { + Some(tx) => tx, + None => { + // Note: Don't panic here because maybe the state has changed from garbage collection. + warn!("Miner: could not find a tx for id {:?}", &candidate.txid); + continue; + } + }; - let (tx_type, do_consider) = match &tx_info.tx.payload { - TransactionPayload::TokenTransfer(..) => ( - "TokenTransfer".to_string(), - settings - .txs_to_consider - .contains(&MemPoolWalkTxTypes::TokenTransfer), - ), - TransactionPayload::SmartContract(..) => ( - "SmartContract".to_string(), - settings - .txs_to_consider - .contains(&MemPoolWalkTxTypes::SmartContract), - ), - TransactionPayload::ContractCall(..) => ( - "ContractCall".to_string(), - settings - .txs_to_consider - .contains(&MemPoolWalkTxTypes::ContractCall), - ), - _ => ("".to_string(), true), - }; - if !do_consider { - debug!("Will skip mempool tx, since it does not have an acceptable type"; + let (tx_type, do_consider) = match &tx_info.tx.payload { + TransactionPayload::TokenTransfer(..) => ( + "TokenTransfer".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::TokenTransfer), + ), + TransactionPayload::SmartContract(..) => ( + "SmartContract".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::SmartContract), + ), + TransactionPayload::ContractCall(..) => ( + "ContractCall".to_string(), + settings + .txs_to_consider + .contains(&MemPoolWalkTxTypes::ContractCall), + ), + _ => ("".to_string(), true), + }; + if !do_consider { + debug!("Mempool: will skip tx, since it does not have an acceptable type"; "txid" => %tx_info.tx.txid(), "type" => %tx_type); - continue; - } + continue; + } - let do_consider = settings.filter_origins.is_empty() - || settings - .filter_origins - .contains(&tx_info.metadata.origin_address); + let do_consider = settings.filter_origins.is_empty() + || settings + .filter_origins + .contains(&tx_info.metadata.origin_address); - if !do_consider { - debug!("Will skip mempool tx, since it does not have an allowed origin"; + if !do_consider { + debug!("Mempool: will skip tx, since it does not have an allowed origin"; "txid" => %tx_info.tx.txid(), "origin" => %tx_info.metadata.origin_address); - continue; - } + continue; + } - let consider = ConsiderTransaction { - tx: tx_info, - update_estimate, - }; - debug!("Consider mempool transaction"; + let consider = ConsiderTransaction { + tx: tx_info, + update_estimate, + }; + debug!("Mempool: consider transaction"; "txid" => %consider.tx.tx.txid(), "origin_addr" => %consider.tx.metadata.origin_address, "origin_nonce" => candidate.origin_nonce, @@ -1737,41 +1745,59 @@ impl MemPoolDB { "tx_fee" => consider.tx.metadata.tx_fee, "fee_rate" => candidate.fee_rate, "size" => consider.tx.metadata.len); - total_considered += 1; - - // Run `todo` on the transaction. - match todo(clarity_tx, &consider, self.cost_estimator.as_mut())? { - Some(tx_event) => { - match tx_event { - TransactionEvent::Success(_) => { - // Bump nonces in the cache for the executed transaction - nonce_cache.set( - consider.tx.metadata.origin_address, - expected_origin_nonce + 1, - &mut nonce_conn, - ); - if consider.tx.tx.auth.is_sponsored() { + total_considered += 1; + + // Run `todo` on the transaction. + match todo(clarity_tx, &consider, self.cost_estimator.as_mut())? { + Some(tx_event) => { + match tx_event { + TransactionEvent::Success(_) => { + // Bump nonces in the cache for the executed transaction nonce_cache.set( - consider.tx.metadata.sponsor_address, - expected_sponsor_nonce + 1, + consider.tx.metadata.origin_address, + expected_origin_nonce + 1, &mut nonce_conn, ); + if consider.tx.tx.auth.is_sponsored() { + nonce_cache.set( + consider.tx.metadata.sponsor_address, + expected_sponsor_nonce + 1, + &mut nonce_conn, + ); + } + output_events.push(tx_event); + } + TransactionEvent::Skipped(_) => { + // don't push `Skipped` events to the observer + } + _ => { + output_events.push(tx_event); } - output_events.push(tx_event); - } - TransactionEvent::Skipped(_) => { - // don't push `Skipped` events to the observer - } - _ => { - output_events.push(tx_event); } } + None => { + debug!("Mempool: early exit from iterator"); + break MempoolIterationStopReason::IteratorExited; + } } - None => { - debug!("Mempool iteration early exit from iterator"); - break MempoolIterationStopReason::IteratorExited; + }; + + // If we've reached the end of the mempool, or if we've stopped + // iterating for some other reason, break out of the loop + if settings.strategy != MemPoolWalkStrategy::NextNonceWithHighestFeeRate + || stop_reason != MempoolIterationStopReason::NoMoreCandidates + || !state_changed + { + if stop_reason == MempoolIterationStopReason::NoMoreCandidates { + info!("Mempool: no more transactions to consider"); } + break stop_reason; } + + // Flush the nonce cache to the database before performing the next + // query. + let mut nonce_conn = self.reopen(true)?; + nonce_cache.flush(&mut nonce_conn); }; // drop these rusqlite statements and queries, since their existence as immutable borrows on the @@ -1781,16 +1807,15 @@ impl MemPoolDB { drop(query_stmt_null); drop(fee_iterator); drop(query_stmt_fee); - drop(nonce_rank_iterator); - drop(query_stmt_nonce_rank); // Write through the nonce cache to the database nonce_cache.flush(&mut self.db); - debug!( + info!( "Mempool iteration finished"; "considered_txs" => u128::from(total_considered), - "elapsed_ms" => start_time.elapsed().as_millis() + "elapsed_ms" => start_time.elapsed().as_millis(), + "stop_reason" => ?stop_reason ); Ok((total_considered, stop_reason)) } diff --git a/stackslib/src/core/nonce_cache.rs b/stackslib/src/core/nonce_cache.rs index 842f7825e72..e15ff36151e 100644 --- a/stackslib/src/core/nonce_cache.rs +++ b/stackslib/src/core/nonce_cache.rs @@ -165,6 +165,7 @@ impl NonceCache { } /// Flush the in-memory cache the the DB. + /// Do not return until successful. pub fn flush(&mut self, conn: &mut DBConn) { self.flush_with_evicted(conn, None) } From 892e451d573e3994364ee3a2771913037d3b111f Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 13 Mar 2025 13:27:22 +0100 Subject: [PATCH 111/238] fix: move mock_mining check to get_utxos() caller while preserving behaviour (returning error), #5841 --- .../src/burnchains/bitcoin_regtest_controller.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 2bc98533be8..5cab6be2ada 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -744,11 +744,6 @@ impl BitcoinRegtestController { utxos_to_exclude: Option, block_height: u64, ) -> Option { - // if mock mining, do not even bother requesting UTXOs - if self.config.get_node_config(false).mock_mining { - return None; - } - let pubk = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { let mut p = *public_key; p.set_compressed(true); @@ -1693,6 +1688,11 @@ impl BitcoinRegtestController { // in RBF, you have to consume the same UTXOs utxos } else { + // if mock mining, do not even bother requesting UTXOs + if self.config.node.mock_mining { + return Err(BurnchainControllerError::NoUTXOs); + } + // Fetch some UTXOs let addr = self.get_miner_address(epoch_id, public_key); match self.get_utxos( From f0eeb42d4327d1cc6a968bc2828c474ad5269cfa Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 13 Mar 2025 13:36:12 +0100 Subject: [PATCH 112/238] docs: update changelog, #5841 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 193ae1ebe24..9718c61bd67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - When a miner times out waiting for signatures, it will re-propose the same block instead of building a new block ([#5877](https://github.com/stacks-network/stacks-core/pull/5877)) - Improve tenure downloader trace verbosity applying proper logging level depending on the tenure state ("debug" if unconfirmed, "info" otherwise) ([#5871](https://github.com/stacks-network/stacks-core/issues/5871)) +- Remove warning log about missing UTXOs when a node is configured as `miner` with `mock_mining` mode enabled ([#5841](https://github.com/stacks-network/stacks-core/issues/5841)) ## [3.1.0.0.7] From 65a685eb36137b562fccd2668a0ba33ac1817903 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Thu, 13 Mar 2025 06:31:43 -0700 Subject: [PATCH 113/238] Removing epoch_25 mod for removed tests --- testnet/stacks-node/src/tests/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index a4546d231b7..d9ed03b7699 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -58,7 +58,6 @@ mod epoch_21; mod epoch_22; mod epoch_23; mod epoch_24; -mod epoch_25; mod integrations; mod mempool; pub mod nakamoto_integrations; From 4ea5e7a5551fcd36bccf49511d5c0816b328d8da Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 13 Mar 2025 16:15:29 +0100 Subject: [PATCH 114/238] added StateMachineUpdate SignerMessage --- libsigner/src/v0/messages.rs | 68 ++++++++++++++++++- .../src/nakamoto_node/stackerdb_listener.rs | 3 + 2 files changed, 68 insertions(+), 3 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index e9234cf5e4c..345779ff635 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -61,7 +61,8 @@ use stacks_common::codec::{ StacksMessageCodec, }; use stacks_common::consts::SIGNER_SLOTS_PER_USER; -use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use tiny_http::{ Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, }; @@ -122,7 +123,9 @@ SignerMessageTypePrefix { /// Mock block signature message from Epoch 2.5 signers MockSignature = 4, /// Mock block message from Epoch 2.5 miners - MockBlock = 5 + MockBlock = 5, + /// State machine update + StateMachineUpdate = 6 }); #[cfg_attr(test, mutants::skip)] @@ -168,6 +171,7 @@ impl From<&SignerMessage> for SignerMessageTypePrefix { SignerMessage::MockProposal(_) => SignerMessageTypePrefix::MockProposal, SignerMessage::MockSignature(_) => SignerMessageTypePrefix::MockSignature, SignerMessage::MockBlock(_) => SignerMessageTypePrefix::MockBlock, + SignerMessage::StateMachineUpdate(_) => SignerMessageTypePrefix::StateMachineUpdate, } } } @@ -187,6 +191,8 @@ pub enum SignerMessage { MockProposal(MockProposal), /// A mock block from the epoch 2.5 miners MockBlock(MockBlock), + /// A state machine update + StateMachineUpdate(StateMachineUpdate), } impl SignerMessage { @@ -199,7 +205,8 @@ impl SignerMessage { Self::BlockProposal(_) | Self::BlockPushed(_) | Self::MockProposal(_) - | Self::MockBlock(_) => None, + | Self::MockBlock(_) + | Self::StateMachineUpdate(_) => None, Self::BlockResponse(_) | Self::MockSignature(_) => Some(MessageSlotID::BlockResponse), // Mock signature uses the same slot as block response since its exclusively for epoch 2.5 testing } } @@ -217,6 +224,9 @@ impl StacksMessageCodec for SignerMessage { SignerMessage::MockSignature(signature) => signature.consensus_serialize(fd), SignerMessage::MockProposal(message) => message.consensus_serialize(fd), SignerMessage::MockBlock(block) => block.consensus_serialize(fd), + SignerMessage::StateMachineUpdate(state_machine_update) => { + state_machine_update.consensus_serialize(fd) + } }?; Ok(()) } @@ -250,6 +260,10 @@ impl StacksMessageCodec for SignerMessage { let block = StacksMessageCodec::consensus_deserialize(fd)?; SignerMessage::MockBlock(block) } + SignerMessageTypePrefix::StateMachineUpdate => { + let state_machine_update = StacksMessageCodec::consensus_deserialize(fd)?; + SignerMessage::StateMachineUpdate(state_machine_update) + } }; Ok(message) } @@ -525,6 +539,54 @@ impl StacksMessageCodec for MockBlock { } } +/// Message for update the Signer State infos +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct StateMachineUpdate { + burn_block: ConsensusHash, + burn_block_height: u64, + current_miner_pkh: Hash160, + parent_tenure_id: ConsensusHash, + parent_tenure_last_block: StacksBlockId, + parent_tenure_last_block_height: u64, + active_signer_protocol_version: u64, + local_supported_signer_protocol_version: u64, +} + +impl StacksMessageCodec for StateMachineUpdate { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.burn_block)?; + write_next(fd, &self.burn_block_height)?; + write_next(fd, &self.current_miner_pkh)?; + write_next(fd, &self.parent_tenure_id)?; + write_next(fd, &self.parent_tenure_last_block)?; + write_next(fd, &self.parent_tenure_last_block_height)?; + write_next(fd, &self.active_signer_protocol_version)?; + write_next(fd, &self.local_supported_signer_protocol_version)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let burn_block = read_next::(fd)?; + let burn_block_height = read_next::(fd)?; + let current_miner_pkh = read_next::(fd)?; + let parent_tenure_id = read_next::(fd)?; + let parent_tenure_last_block = read_next::(fd)?; + let parent_tenure_last_block_height = read_next::(fd)?; + let active_signer_protocol_version = read_next::(fd)?; + let local_supported_signer_protocol_version = read_next::(fd)?; + Ok(Self { + burn_block, + burn_block_height, + current_miner_pkh, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + active_signer_protocol_version, + local_supported_signer_protocol_version, + }) + } +} + define_u8_enum!( /// Enum representing the reject code type prefix RejectCodeTypePrefix { diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index a0225bc7efd..42811cd7842 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -440,6 +440,9 @@ impl StackerDBListener { | SignerMessageV0::MockBlock(_) => { debug!("Received mock message. Ignoring."); } + SignerMessageV0::StateMachineUpdate(_) => { + debug!("Received state machine update message. Ignoring."); + } }; } } From c87ade363ac81b2d6cb4a2bac636debd4508694a Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 13 Mar 2025 16:27:55 +0100 Subject: [PATCH 115/238] MessageSlotID::StateMachineUpdate --- libsigner/src/v0/messages.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 345779ff635..5aa1e98009c 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -82,7 +82,9 @@ define_u8_enum!( /// the contract index in the signers contracts (i.e., X in signers-0-X) MessageSlotID { /// Block Response message from signers - BlockResponse = 1 + BlockResponse = 1, + /// Signer State Machine Update + StateMachineUpdate = 2 }); define_u8_enum!( @@ -205,9 +207,9 @@ impl SignerMessage { Self::BlockProposal(_) | Self::BlockPushed(_) | Self::MockProposal(_) - | Self::MockBlock(_) - | Self::StateMachineUpdate(_) => None, + | Self::MockBlock(_) => None, Self::BlockResponse(_) | Self::MockSignature(_) => Some(MessageSlotID::BlockResponse), // Mock signature uses the same slot as block response since its exclusively for epoch 2.5 testing + Self::StateMachineUpdate(_) => Some(MessageSlotID::StateMachineUpdate), } } } From 5ddd4fc455fe51e136544f01c08d1e8d487b6fae Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Thu, 13 Mar 2025 10:28:56 -0700 Subject: [PATCH 116/238] Remove unnecessary lines - originally proposed by `0xpessimist` in stacks-network/stacks-core#5879 via stacks-network/docs#1710 --- clarity/src/vm/docs/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index a92b4fdfdb4..d47057b29cf 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -499,7 +499,6 @@ Note: Corner cases are handled with the following rules: * if both `i1` and `i2` are `0`, return `1` * if `i1` is `1`, return `1` * if `i1` is `0`, return `0` - * if `i2` is `1`, return `i1` * if `i2` is negative or greater than `u32::MAX`, throw a runtime error", example: "(pow 2 3) ;; Returns 8 (pow 2 2) ;; Returns 4 From 500c434b4d9e5e9112edbcbaf3c38c74975f6c51 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Thu, 13 Mar 2025 13:31:15 -0700 Subject: [PATCH 117/238] Remove fn make_mblock_tx_chain --- .../src/tests/neon_integrations.rs | 32 ------------------- 1 file changed, 32 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 5111393dd8c..3d912fbe5b8 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -8614,38 +8614,6 @@ pub fn make_random_tx_chain( chain } -fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) -> Vec> { - let addr = to_addr(privk); - let mut chain = vec![]; - - for nonce in 0..25 { - // N.B. private keys are 32-33 bytes, so this is always safe - let random_iters = privk.to_bytes()[nonce as usize] as usize; - - let be_bytes = [ - privk.to_bytes()[nonce as usize], - privk.to_bytes()[(nonce + 1) as usize], - ]; - - let random_extra_fee = u16::from_be_bytes(be_bytes) as u64; - - let mut addr_prefix = addr.to_string(); - let _ = addr_prefix.split_off(12); - let contract_name = format!("crct-{nonce}-{addr_prefix}-{random_iters}"); - eprintln!("Make tx {contract_name}"); - let tx = make_contract_publish_microblock_only( - privk, - nonce, - 1049230 + nonce + fee_plus + random_extra_fee, - chain_id, - &contract_name, - &make_runtime_sized_contract(1, nonce, &addr_prefix), - ); - chain.push(tx); - } - chain -} - fn test_competing_miners_build_on_same_chain( num_miners: usize, conf_template: Config, From ad3dc03080a9421d56bc390c6ff8fa386fdcb0b5 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 13 Mar 2025 16:54:03 -0700 Subject: [PATCH 118/238] feat: issue RBF when burnchain/miner config changed --- stackslib/src/config/mod.rs | 2 +- testnet/stacks-node/src/globals.rs | 28 +++- .../stacks-node/src/nakamoto_node/relayer.rs | 88 ++++++++---- .../src/tests/nakamoto_integrations.rs | 136 ++++++++++++++++++ 4 files changed, 228 insertions(+), 26 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 937c90ebdc8..56a90bafef7 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -1197,7 +1197,7 @@ impl std::default::Default for Config { } } -#[derive(Clone, Debug, Default, Deserialize)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq)] pub struct BurnchainConfig { pub chain: String, pub mode: String, diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index b70913c581e..fc8617b5f04 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -10,7 +10,7 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::MinerStatus; -use stacks::config::MinerConfig; +use stacks::config::{BurnchainConfig, MinerConfig}; use stacks::net::NetworkResult; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, ConsensusHash}; @@ -63,6 +63,8 @@ pub struct Globals { pub leader_key_registration_state: Arc>, /// Last miner config loaded last_miner_config: Arc>>, + /// Last burnchain config + last_burnchain_config: Arc>>, /// Last miner spend amount last_miner_spend_amount: Arc>>, /// burnchain height at which we start mining @@ -93,6 +95,7 @@ impl Clone for Globals { should_keep_running: self.should_keep_running.clone(), leader_key_registration_state: self.leader_key_registration_state.clone(), last_miner_config: self.last_miner_config.clone(), + last_burnchain_config: self.last_burnchain_config.clone(), last_miner_spend_amount: self.last_miner_spend_amount.clone(), start_mining_height: self.start_mining_height.clone(), estimated_winning_probs: self.estimated_winning_probs.clone(), @@ -125,6 +128,7 @@ impl Globals { should_keep_running, leader_key_registration_state: Arc::new(Mutex::new(leader_key_registration_state)), last_miner_config: Arc::new(Mutex::new(None)), + last_burnchain_config: Arc::new(Mutex::new(None)), last_miner_spend_amount: Arc::new(Mutex::new(None)), start_mining_height: Arc::new(Mutex::new(start_mining_height)), estimated_winning_probs: Arc::new(Mutex::new(HashMap::new())), @@ -355,6 +359,28 @@ impl Globals { } } + /// Get the last burnchain config + pub fn get_last_burnchain_config(&self) -> Option { + match self.last_burnchain_config.lock() { + Ok(last_burnchain_config) => (*last_burnchain_config).clone(), + Err(_e) => { + error!("FATAL; failed to lock last burnchain config"); + panic!(); + } + } + } + + /// Set the last burnchain config + pub fn set_last_burnchain_config(&self, burnchain_config: BurnchainConfig) { + match self.last_burnchain_config.lock() { + Ok(ref mut last_burnchain_config) => **last_burnchain_config = Some(burnchain_config), + Err(_e) => { + error!("FATAL; failed to lock last burnchain config"); + panic!(); + } + } + } + /// Get the last miner spend amount pub fn get_last_miner_spend_amount(&self) -> Option { match self.last_miner_spend_amount.lock() { diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 60ec5f73796..f35079abde9 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -42,6 +42,7 @@ use stacks::chainstate::stacks::miner::{ set_mining_spend_amount, signal_mining_blocked, signal_mining_ready, }; use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::config::BurnchainConfig; use stacks::core::mempool::MemPoolDB; use stacks::core::STACKS_EPOCH_3_1_MARKER; use stacks::monitoring::increment_stx_blocks_mined_counter; @@ -1101,29 +1102,7 @@ impl RelayerThread { return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); }; - let burnchain_config = self.config.get_burnchain_config(); - let last_miner_spend_opt = self.globals.get_last_miner_spend_amount(); - let force_remine = if let Some(last_miner_spend_amount) = last_miner_spend_opt { - last_miner_spend_amount != burnchain_config.burn_fee_cap - } else { - false - }; - if force_remine { - info!( - "Miner config changed; updating spend amount {}", - burnchain_config.burn_fee_cap - ); - } - - self.globals - .set_last_miner_spend_amount(burnchain_config.burn_fee_cap); - - set_mining_spend_amount( - self.globals.get_miner_status(), - burnchain_config.burn_fee_cap, - ); - // amount of burnchain tokens (e.g. sats) we'll spend across the PoX outputs - let burn_fee_cap = burnchain_config.burn_fee_cap; + let (_, burnchain_config) = self.check_burnchain_config_changed(); // let's commit, but target the current burnchain tip with our modulus so the commit is // only valid if it lands in the targeted burnchain block height @@ -1155,7 +1134,7 @@ impl RelayerThread { highest_tenure_start_block_header.index_block_hash().0, ), // the rest of this is the same as epoch2x commits, modulo the new epoch marker - burn_fee: burn_fee_cap, + burn_fee: burnchain_config.burn_fee_cap, apparent_sender: sender, key_block_ptr: u32::try_from(key.block_height) .expect("FATAL: burn block height exceeded u32"), @@ -1768,6 +1747,21 @@ impl RelayerThread { "burnchain view changed?" => %burnchain_changed, "highest tenure changed?" => %highest_tenure_changed); + // If the miner spend or config has changed, we want to RBF with new config values. + let (burnchain_config_changed, _) = self.check_burnchain_config_changed(); + let miner_config_changed = self.check_miner_config_changed(); + + if burnchain_config_changed || miner_config_changed { + info!("Miner spend or config changed; issuing block commit with new values"; + "miner_spend_changed" => %burnchain_config_changed, + "miner_config_changed" => %miner_config_changed, + ); + return Ok(Some(RelayerDirective::IssueBlockCommit( + stacks_tip_ch, + stacks_tip_bh, + ))); + } + if !burnchain_changed && !highest_tenure_changed { // nothing to do return Ok(None); @@ -2136,6 +2130,52 @@ impl RelayerThread { debug!("Relayer: handled directive"; "continue_running" => continue_running); continue_running } + + /// Reload config.burnchain to see if burn_fee_cap has changed. + /// If it has, update the miner spend amount and return true. + pub fn check_burnchain_config_changed(&self) -> (bool, BurnchainConfig) { + let burnchain_config = self.config.get_burnchain_config(); + let last_burnchain_config_opt = self.globals.get_last_burnchain_config(); + let burnchain_config_changed = + if let Some(last_burnchain_config) = last_burnchain_config_opt { + last_burnchain_config != burnchain_config + } else { + false + }; + if burnchain_config_changed { + info!( + "Burnchain config changed; updating spend amount {}", + burnchain_config.burn_fee_cap + ); + } + + self.globals + .set_last_miner_spend_amount(burnchain_config.burn_fee_cap); + + set_mining_spend_amount( + self.globals.get_miner_status(), + burnchain_config.burn_fee_cap, + ); + + (burnchain_config_changed, burnchain_config) + } + + pub fn check_miner_config_changed(&self) -> bool { + let miner_config = self.config.get_miner_config(); + let last_miner_config_opt = self.globals.get_last_miner_config(); + let miner_config_changed = if let Some(last_miner_config) = last_miner_config_opt { + last_miner_config != miner_config + } else { + false + }; + if miner_config_changed { + info!("Miner config changed; forcing a re-mine attempt"); + } + + self.globals.set_last_miner_config(miner_config); + + miner_config_changed + } } #[cfg(test)] diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 9d8f8dbf6b1..0917342f2bd 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -11237,3 +11237,139 @@ fn reload_miner_config() { run_loop_thread.join().unwrap(); } + +/// Test that a new block commit is issued when the miner spend or config changes. +/// +/// The test boots into Nakamoto. Then, it waits for a block commit on the most recent +/// tip. The config is updated, and then the test ensures that a new commit was submitted after that +/// config change. +#[test] +#[ignore] +fn rbf_on_config_change() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + let _http_origin = format!("http://{}", &conf.node.rpc_bind); + conf.connection_options.auth_token = Some(password.clone()); + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + conf.node.next_initiative_delay = 500; + let stacker_sk = setup_stacker(&mut conf); + let signer_sk = Secp256k1PrivateKey::random(); + let signer_addr = tests::to_addr(&signer_sk); + let sender_sk = Secp256k1PrivateKey::random(); + let recipient_sk = Secp256k1PrivateKey::random(); + let _recipient_addr = tests::to_addr(&recipient_sk); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let old_burn_fee_cap: u64 = 100000; + conf.burnchain.burn_fee_cap = old_burn_fee_cap; + conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 1000000); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); + + test_observer::spawn(); + test_observer::register(&mut conf, &[EventKeyType::AnyEvent]); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let conf_path = + std::env::temp_dir().join(format!("miner-config-test-{}.toml", rand::random::())); + conf.config_path = Some(conf_path.clone().to_str().unwrap().to_string()); + + // Make a minimum-viable config file + let update_config = |burn_fee_cap: u64, sats_vbyte: u64| { + use std::io::Write; + + let new_config = format!( + r#" + [burnchain] + burn_fee_cap = {} + satoshis_per_byte = {} + "#, + burn_fee_cap, sats_vbyte, + ); + // Write to a file + let mut file = File::create(&conf_path).unwrap(); + file.write_all(new_config.as_bytes()).unwrap(); + }; + + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let counters = run_loop.counters(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![signer_sk]); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &conf, + &blocks_processed, + &[stacker_sk], + &[signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + blind_signer(&conf, &signers, &counters); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); + + let burnchain = conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let stacks_height = tip.stacks_block_height; + + let mut last_log = Instant::now(); + last_log -= Duration::from_secs(5); + wait_for(10, || { + let last_commit = &counters.naka_submitted_commit_last_stacks_tip.get(); + if last_log.elapsed() >= Duration::from_secs(5) { + info!( + "---- last_commit: {:?} stacks_height: {:?} ---- ", + last_commit, stacks_height + ); + last_log = Instant::now(); + } + Ok(*last_commit >= stacks_height) + }) + .expect("Failed to wait for last commit"); + + let commits_before = counters.naka_submitted_commits.get(); + + info!("---- Updating config ----"); + + update_config(155000, 57); + + wait_for(12, || { + let commit_count = &counters.naka_submitted_commits.get(); + Ok(*commit_count > commits_before) + }) + .expect("Expected new commit after config change"); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} From fa9ffd233574f68e8a65212e2acf768f56e77f81 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 14 Mar 2025 12:59:21 +0100 Subject: [PATCH 119/238] added unit test for serialization/deserializaton of StateMachineUpdate --- libsigner/src/v0/messages.rs | 60 ++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 5aa1e98009c..18b205e63db 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -2045,4 +2045,64 @@ mod test { RejectReason::Unknown(RejectReasonPrefix::Unknown as u8) ); } + + #[test] + fn test_deserialize_state_machine_update() { + let signer_message = StateMachineUpdate { + burn_block: ConsensusHash::from_bytes(&[0x55; 20]).unwrap(), + burn_block_height: 100, + current_miner_pkh: Hash160::from_data(&[0xab; 32]), + parent_tenure_id: ConsensusHash::from_bytes(&[0x22; 20]).unwrap(), + parent_tenure_last_block: StacksBlockId([0x33u8; 32]), + parent_tenure_last_block_height: 1, + active_signer_protocol_version: 2, + local_supported_signer_protocol_version: 3, + }; + + let mut bytes = vec![]; + signer_message.consensus_serialize(&mut bytes).unwrap(); + + let signer_message_deserialized = + StateMachineUpdate::consensus_deserialize(&mut &bytes[..]).unwrap(); + + assert_eq!( + signer_message.burn_block, + signer_message_deserialized.burn_block + ); + + assert_eq!( + signer_message.burn_block_height, + signer_message_deserialized.burn_block_height + ); + + assert_eq!( + signer_message.current_miner_pkh, + signer_message_deserialized.current_miner_pkh + ); + + assert_eq!( + signer_message.parent_tenure_id, + signer_message_deserialized.parent_tenure_id + ); + + assert_eq!( + signer_message.parent_tenure_last_block, + signer_message_deserialized.parent_tenure_last_block + ); + + assert_eq!( + signer_message.parent_tenure_last_block_height, + signer_message_deserialized.parent_tenure_last_block_height + ); + + assert_eq!( + signer_message.active_signer_protocol_version, + signer_message_deserialized.active_signer_protocol_version + ); + + assert_eq!( + signer_message.local_supported_signer_protocol_version, + signer_message_deserialized.local_supported_signer_protocol_version + ); + } } From 9eda9c582799a5882ea7cd51074b803a1d628d9d Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 14 Mar 2025 13:16:59 +0100 Subject: [PATCH 120/238] added CostErrors::ExecutionTimeExpired --- clarity/src/vm/analysis/errors.rs | 1 + clarity/src/vm/ast/errors.rs | 4 ++++ clarity/src/vm/contexts.rs | 4 +--- clarity/src/vm/costs/mod.rs | 1 + clarity/src/vm/mod.rs | 3 ++- clarity/src/vm/tests/simple_apply_eval.rs | 5 +++-- 6 files changed, 12 insertions(+), 6 deletions(-) diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index 8708e85f250..6cd03afc3cf 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -280,6 +280,7 @@ impl From for CheckErrors { CheckErrors::Expects("Unexpected interpreter failure in cost computation".into()) } CostErrors::Expect(s) => CheckErrors::Expects(s), + CostErrors::ExecutionTimeExpired => CheckErrors::ExecutionTimeExpired, } } } diff --git a/clarity/src/vm/ast/errors.rs b/clarity/src/vm/ast/errors.rs index 6c668bacc1d..278846fc505 100644 --- a/clarity/src/vm/ast/errors.rs +++ b/clarity/src/vm/ast/errors.rs @@ -92,6 +92,8 @@ pub enum ParseErrors { UnexpectedParserFailure, /// Should be an unreachable failure which invalidates the transaction InterpreterFailure, + + ExecutionTimeExpired, } #[derive(Debug, PartialEq)] @@ -173,6 +175,7 @@ impl From for ParseError { CostErrors::InterpreterFailure | CostErrors::Expect(_) => { ParseError::new(ParseErrors::InterpreterFailure) } + CostErrors::ExecutionTimeExpired => ParseError::new(ParseErrors::ExecutionTimeExpired), } } } @@ -299,6 +302,7 @@ impl DiagnosableError for ParseErrors { ParseErrors::NoteToMatchThis(token) => format!("to match this '{}'", token), ParseErrors::UnexpectedParserFailure => "unexpected failure while parsing".to_string(), ParseErrors::InterpreterFailure => "unexpected failure while parsing".to_string(), + ParseErrors::ExecutionTimeExpired => "max execution time expired".to_string(), } } diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 0bd314d0b24..3d83a920b7a 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; use std::fmt; use std::mem::replace; -use std::time::Instant; +use std::time::{Duration, Instant}; use hashbrown::{HashMap, HashSet}; use serde::Serialize; @@ -48,8 +48,6 @@ use crate::vm::types::{ use crate::vm::version::ClarityVersion; use crate::vm::{ast, eval, is_reserved, stx_transfer_consolidated}; -use std::time::Duration; - pub const MAX_CONTEXT_DEPTH: u16 = 256; // TODO: diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 1d8806690a6..0b1559795f6 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -414,6 +414,7 @@ pub enum CostErrors { CostContractLoadFailure, InterpreterFailure, Expect(String), + ExecutionTimeExpired, } impl CostErrors { diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 7bb0a81fe00..8ed2564c829 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -55,6 +55,7 @@ pub mod clarity; use std::collections::BTreeMap; +use costs::CostErrors; use serde_json; use stacks_common::types::StacksEpochId; @@ -325,7 +326,7 @@ pub fn eval( exp, env.global_context.execution_time_tracker.elapsed() ); - return Err(CheckErrors::ExecutionTimeExpired.into()); + return Err(CostErrors::ExecutionTimeExpired.into()); } if let Some(mut eval_hooks) = env.global_context.eval_hooks.take() { diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 18d9e287614..c8049ec2ca2 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -41,7 +41,8 @@ use crate::vm::types::{ use crate::vm::{ eval, execute as vm_execute, execute_v2 as vm_execute_v2, execute_with_max_execution_time as vm_execute_with_max_execution_time, execute_with_parameters, - CallStack, ClarityVersion, ContractContext, Environment, GlobalContext, LocalContext, Value, + CallStack, ClarityVersion, ContractContext, CostErrors, Environment, GlobalContext, + LocalContext, Value, }; #[test] @@ -1773,6 +1774,6 @@ fn test_execution_time_expiration() { vm_execute_with_max_execution_time("(+ 1 1)", Duration::from_secs(0)) .err() .unwrap(), - CheckErrors::ExecutionTimeExpired.into() + CostErrors::ExecutionTimeExpired.into() ); } From 13782ebc84d6d9c9d1d3bfba2a38170a8d914352 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 14 Mar 2025 13:40:26 +0100 Subject: [PATCH 121/238] added ExecutionTimeTracker --- clarity/src/vm/clarity.rs | 6 +++++- clarity/src/vm/contexts.rs | 24 ++++++++++++++++++------ clarity/src/vm/mod.rs | 30 ++++++++++++++++-------------- 3 files changed, 39 insertions(+), 21 deletions(-) diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index e3208287462..a2458484e8c 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -304,7 +304,11 @@ pub trait TransactionConnection: ClarityConnection { self.with_abort_callback( |vm_env| { - vm_env.context.set_max_execution_time(max_execution_time); + if let Some(max_execution_time_duration) = max_execution_time { + vm_env + .context + .set_max_execution_time(max_execution_time_duration); + } vm_env .execute_transaction( sender.clone(), diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 3d83a920b7a..3b98c4b828a 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -182,6 +182,17 @@ pub struct EventBatch { pub events: Vec, } +/** ExecutionTimeTracker keeps track of how much time a contract call is taking. + It is checked at every eval call. +*/ +pub enum ExecutionTimeTracker { + NoTracking, + MaxTime { + start_time: Instant, + max_duration: Duration, + }, +} + /** GlobalContext represents the outermost context for a single transaction's execution. It tracks an asset changes that occurred during the processing of the transaction, whether or not the current context is read_only, @@ -200,8 +211,7 @@ pub struct GlobalContext<'a, 'hooks> { /// This is the chain ID of the transaction pub chain_id: u32, pub eval_hooks: Option>, - pub execution_time_tracker: Instant, - pub max_execution_time: Option, + pub execution_time_tracker: ExecutionTimeTracker, } #[derive(Serialize, Deserialize, Clone)] @@ -1556,8 +1566,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { epoch_id, chain_id, eval_hooks: None, - execution_time_tracker: Instant::now(), - max_execution_time: None, + execution_time_tracker: ExecutionTimeTracker::NoTracking, } } @@ -1565,8 +1574,11 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { self.asset_maps.is_empty() } - pub fn set_max_execution_time(&mut self, max_execution_time: Option) { - self.max_execution_time = max_execution_time + pub fn set_max_execution_time(&mut self, max_execution_time: Duration) { + self.execution_time_tracker = ExecutionTimeTracker::MaxTime { + start_time: Instant::now(), + max_duration: max_execution_time, + } } fn get_asset_map(&mut self) -> Result<&mut AssetMap> { diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 8ed2564c829..a8948afc121 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -64,10 +64,10 @@ use self::ast::ContractAST; use self::costs::ExecutionCost; use self::diagnostic::Diagnostic; use crate::vm::callables::CallableType; -use crate::vm::contexts::GlobalContext; pub use crate::vm::contexts::{ CallStack, ContractContext, Environment, LocalContext, MAX_CONTEXT_DEPTH, }; +use crate::vm::contexts::{ExecutionTimeTracker, GlobalContext}; use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::{ runtime_cost, CostOverflowingMath, CostTracker, LimitedCostTracker, MemoryConsumer, @@ -304,11 +304,20 @@ pub fn apply( } } -fn check_max_execution_time_expired(global_context: &GlobalContext) -> bool { - if let Some(max_execution_time) = global_context.max_execution_time { - return global_context.execution_time_tracker.elapsed() >= max_execution_time; +fn check_max_execution_time_expired(global_context: &GlobalContext) -> Result<()> { + match global_context.execution_time_tracker { + ExecutionTimeTracker::NoTracking => Ok(()), + ExecutionTimeTracker::MaxTime { + start_time, + max_duration, + } => { + if start_time.elapsed() >= max_duration { + Err(CostErrors::ExecutionTimeExpired.into()) + } else { + Ok(()) + } + } } - false } pub fn eval( @@ -320,14 +329,7 @@ pub fn eval( Atom, AtomValue, Field, List, LiteralValue, TraitReference, }; - if check_max_execution_time_expired(env.global_context) { - warn!( - "ExecutionTime expired while running {:?} ({:?} elapsed)", - exp, - env.global_context.execution_time_tracker.elapsed() - ); - return Err(CostErrors::ExecutionTimeExpired.into()); - } + check_max_execution_time_expired(env.global_context)?; if let Some(mut eval_hooks) = env.global_context.eval_hooks.take() { for hook in eval_hooks.iter_mut() { @@ -616,7 +618,7 @@ pub fn execute_with_max_execution_time( ast::ASTRules::PrecheckSize, false, |g| { - g.set_max_execution_time(Some(max_execution_time)); + g.set_max_execution_time(max_execution_time); Ok(()) }, ) From 5d4475c1d0b600cd82c5285d6215388605e4c3e5 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 14 Mar 2025 14:06:50 +0100 Subject: [PATCH 122/238] fixed configuration system --- stackslib/src/config/mod.rs | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 8aa7309033e..f27940ad20c 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -1104,7 +1104,13 @@ impl Config { }, miner_status, confirm_microblocks: false, - max_execution_time: miner_config.max_execution_time, + max_execution_time: if let Some(max_execution_time_secs) = + miner_config.max_execution_time + { + Some(Duration::from_secs(max_execution_time_secs)) + } else { + None + }, } } @@ -1147,7 +1153,13 @@ impl Config { }, miner_status, confirm_microblocks: true, - max_execution_time: miner_config.max_execution_time, + max_execution_time: if let Some(max_execution_time_secs) = + miner_config.max_execution_time + { + Some(Duration::from_secs(max_execution_time_secs)) + } else { + None + }, } } @@ -2179,8 +2191,8 @@ pub struct MinerConfig { pub tenure_extend_cost_threshold: u64, /// Define the timeout to apply while waiting for signers responses, based on the amount of rejections pub block_rejection_timeout_steps: HashMap, - - pub max_execution_time: Option, + /// Define max execution for contract calls + pub max_execution_time: Option, } impl Default for MinerConfig { @@ -2630,7 +2642,7 @@ pub struct MinerConfigFile { pub tenure_timeout_secs: Option, pub tenure_extend_cost_threshold: Option, pub block_rejection_timeout_steps: Option>, - pub max_execution_time: Option, + pub max_execution_time: Option, } impl MinerConfigFile { From f07179d0e858a7fb437355b0683fac7c022acbb2 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 14 Mar 2025 14:08:08 +0100 Subject: [PATCH 123/238] updated CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 193ae1ebe24..d5a35d0acda 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added" - Add fee information to transaction log ending with "success" or "skipped", while building a new block +- Add `max_execution_time` to miner config for limiting duration of contract calls ### Changed From f4f234b1f95cfd63dcf3806f921d13034660379a Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Fri, 14 Mar 2025 06:54:40 -0700 Subject: [PATCH 124/238] remove tests related to make_signed_microblock and make_mblock_tx_chain --- .../src/chainstate/stacks/db/transactions.rs | 430 ------------------ .../src/tests/neon_integrations.rs | 28 -- 2 files changed, 458 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index f92bde7d981..1185124dfca 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -8112,436 +8112,6 @@ pub mod test { conn.commit_block(); } - fn make_signed_microblock( - block_privk: &StacksPrivateKey, - tx_privk: &StacksPrivateKey, - parent_block: BlockHeaderHash, - seq: u16, - ) -> StacksMicroblock { - // make transaction - let contract = r#" - (define-public (send-stx (amount uint) (recipient principal)) - (stx-transfer? amount tx-sender recipient)) - "#; - - let auth = TransactionAuth::from_p2pkh(tx_privk).unwrap(); - let addr = auth.origin().address_testnet(); - - let mut rng = rand::thread_rng(); - - let mut tx_contract_create = StacksTransaction::new( - TransactionVersion::Testnet, - auth, - TransactionPayload::new_smart_contract( - &format!("hello-world-{}", &rng.gen::()), - contract, - None, - ) - .unwrap(), - ); - - tx_contract_create.chain_id = 0x80000000; - tx_contract_create.set_tx_fee(0); - - let mut signer = StacksTransactionSigner::new(&tx_contract_create); - signer.sign_origin(tx_privk).unwrap(); - - let signed_contract_tx = signer.get_tx().unwrap(); - - // make block - let txs = vec![signed_contract_tx]; - let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); - let merkle_tree = MerkleTree::::new(&txid_vecs); - let tx_merkle_root = merkle_tree.root(); - - let mut mblock = StacksMicroblock { - header: StacksMicroblockHeader { - version: 0x12, - sequence: seq, - prev_block: parent_block, - tx_merkle_root, - signature: MessageSignature([0u8; 65]), - }, - txs, - }; - mblock.sign(block_privk).unwrap(); - mblock - } - - #[test] - fn process_poison_microblock_same_block() { - let privk = StacksPrivateKey::from_hex( - "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", - ) - .unwrap(); - let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); - let addr = auth.origin().address_testnet(); - - let balances = vec![(addr.clone(), 1000000000)]; - - let mut chainstate = - instantiate_chainstate_with_balances(false, 0x80000000, function_name!(), balances); - - let block_privk = StacksPrivateKey::from_hex( - "2f90f1b148207a110aa58d1b998510407420d7a8065d4fdfc0bbe22c5d9f1c6a01", - ) - .unwrap(); - - let block_pubkh = - Hash160::from_node_public_key(&StacksPublicKey::from_private(&block_privk)); - - let reporter_privk = StacksPrivateKey::from_hex( - "e606e944014b2a9788d0e3c8defaf6bc44b1e3ab881aaba32faa6e32002b7e1f01", - ) - .unwrap(); - let reporter_addr = TransactionAuth::from_p2pkh(&reporter_privk) - .unwrap() - .origin() - .address_testnet(); - - for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { - let mut conn = chainstate.block_begin( - *burn_db, - &FIRST_BURNCHAIN_CONSENSUS_HASH, - &FIRST_STACKS_BLOCK_HASH, - &ConsensusHash([(dbi + 1) as u8; 20]), - &BlockHeaderHash([(dbi + 1) as u8; 32]), - ); - - StacksChainState::insert_microblock_pubkey_hash(&mut conn, 1, &block_pubkh).unwrap(); - - let height_opt = - StacksChainState::has_microblock_pubkey_hash(&mut conn, &block_pubkh).unwrap(); - assert_eq!(height_opt.unwrap(), 1); - - // make poison - let mblock_1 = - make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x11; 32]), 123); - let mblock_2 = - make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x11; 32]), 123); - assert!(mblock_1 != mblock_2); - - // report poison (in the same block) - let mut tx_poison_microblock = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&reporter_privk).unwrap(), - TransactionPayload::PoisonMicroblock( - mblock_1.header.clone(), - mblock_2.header.clone(), - ), - ); - - tx_poison_microblock.chain_id = 0x80000000; - tx_poison_microblock.set_tx_fee(0); - - let mut signer = StacksTransactionSigner::new(&tx_poison_microblock); - signer.sign_origin(&reporter_privk).unwrap(); - let signed_tx_poison_microblock = signer.get_tx().unwrap(); - - // process it! - let (fee, receipt) = StacksChainState::process_transaction( - &mut conn, - &signed_tx_poison_microblock, - false, - ASTRules::PrecheckSize, - ) - .unwrap(); - - // there must be a poison record for this microblock, from the reporter, for the microblock - // sequence. - let report_opt = StacksChainState::get_poison_microblock_report(&mut conn, 1).unwrap(); - assert_eq!(report_opt.unwrap(), (reporter_addr, 123)); - - // result must encode poison information - let result_data = receipt.result.expect_tuple().unwrap(); - - let height = result_data - .get("block_height") - .unwrap() - .to_owned() - .expect_u128() - .unwrap(); - let mblock_pubkh = result_data - .get("microblock_pubkey_hash") - .unwrap() - .to_owned() - .expect_buff(20) - .unwrap(); - let reporter = result_data - .get("reporter") - .unwrap() - .to_owned() - .expect_principal() - .unwrap(); - let seq = result_data - .get("sequence") - .unwrap() - .to_owned() - .expect_u128() - .unwrap(); - - assert_eq!(height, 1); - assert_eq!(mblock_pubkh, block_pubkh.0.to_vec()); - assert_eq!(seq, 123); - assert_eq!(reporter, reporter_addr.to_account_principal()); - - conn.commit_block(); - } - } - - #[test] - fn process_poison_microblock_invalid_transaction() { - let privk = StacksPrivateKey::from_hex( - "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", - ) - .unwrap(); - let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); - let addr = auth.origin().address_testnet(); - - let balances = vec![(addr.clone(), 1000000000)]; - - let mut chainstate = - instantiate_chainstate_with_balances(false, 0x80000000, function_name!(), balances); - - let block_privk = StacksPrivateKey::from_hex( - "2f90f1b148207a110aa58d1b998510407420d7a8065d4fdfc0bbe22c5d9f1c6a01", - ) - .unwrap(); - - let block_pubkh = - Hash160::from_node_public_key(&StacksPublicKey::from_private(&block_privk)); - - let reporter_privk = StacksPrivateKey::from_hex( - "e606e944014b2a9788d0e3c8defaf6bc44b1e3ab881aaba32faa6e32002b7e1f01", - ) - .unwrap(); - let reporter_addr = TransactionAuth::from_p2pkh(&reporter_privk) - .unwrap() - .origin() - .address_testnet(); - - for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { - let mut conn = chainstate.block_begin( - *burn_db, - &FIRST_BURNCHAIN_CONSENSUS_HASH, - &FIRST_STACKS_BLOCK_HASH, - &ConsensusHash([(dbi + 1) as u8; 20]), - &BlockHeaderHash([(dbi + 1) as u8; 32]), - ); - - StacksChainState::insert_microblock_pubkey_hash(&mut conn, 1, &block_pubkh).unwrap(); - - let height_opt = - StacksChainState::has_microblock_pubkey_hash(&mut conn, &block_pubkh).unwrap(); - assert_eq!(height_opt.unwrap(), 1); - - // make poison, but for an unknown microblock fork - let mblock_1 = make_signed_microblock(&privk, &privk, BlockHeaderHash([0x11; 32]), 123); - let mblock_2 = make_signed_microblock(&privk, &privk, BlockHeaderHash([0x11; 32]), 123); - assert!(mblock_1 != mblock_2); - - // report poison (in the same block) - let mut tx_poison_microblock = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&reporter_privk).unwrap(), - TransactionPayload::PoisonMicroblock( - mblock_1.header.clone(), - mblock_2.header.clone(), - ), - ); - - tx_poison_microblock.chain_id = 0x80000000; - tx_poison_microblock.set_tx_fee(0); - - let mut signer = StacksTransactionSigner::new(&tx_poison_microblock); - signer.sign_origin(&reporter_privk).unwrap(); - let signed_tx_poison_microblock = signer.get_tx().unwrap(); - - // should fail to process -- the transaction is invalid if it doesn't point to a known - // microblock pubkey hash. - let err = StacksChainState::process_transaction( - &mut conn, - &signed_tx_poison_microblock, - false, - ASTRules::PrecheckSize, - ) - .unwrap_err(); - let Error::ClarityError(clarity_error::BadTransaction(msg)) = &err else { - panic!("Unexpected error type"); - }; - assert!(msg.find("never seen in this fork").is_some()); - conn.commit_block(); - } - } - - #[test] - fn process_poison_microblock_multiple_same_block() { - let privk = StacksPrivateKey::from_hex( - "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", - ) - .unwrap(); - let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); - let addr = auth.origin().address_testnet(); - - let balances = vec![(addr.clone(), 1000000000)]; - - let mut chainstate = - instantiate_chainstate_with_balances(false, 0x80000000, function_name!(), balances); - - let block_privk = StacksPrivateKey::from_hex( - "2f90f1b148207a110aa58d1b998510407420d7a8065d4fdfc0bbe22c5d9f1c6a01", - ) - .unwrap(); - - let block_pubkh = - Hash160::from_node_public_key(&StacksPublicKey::from_private(&block_privk)); - - let reporter_privk_1 = StacksPrivateKey::from_hex( - "e606e944014b2a9788d0e3c8defaf6bc44b1e3ab881aaba32faa6e32002b7e1f01", - ) - .unwrap(); - let reporter_privk_2 = StacksPrivateKey::from_hex( - "ca7ba28b9604418413a16d74e7dbe5c3e0012281183f590940bab0208c40faee01", - ) - .unwrap(); - let reporter_addr_1 = TransactionAuth::from_p2pkh(&reporter_privk_1) - .unwrap() - .origin() - .address_testnet(); - let reporter_addr_2 = TransactionAuth::from_p2pkh(&reporter_privk_2) - .unwrap() - .origin() - .address_testnet(); - - for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { - let mut conn = chainstate.block_begin( - *burn_db, - &FIRST_BURNCHAIN_CONSENSUS_HASH, - &FIRST_STACKS_BLOCK_HASH, - &ConsensusHash([(dbi + 1) as u8; 20]), - &BlockHeaderHash([(dbi + 1) as u8; 32]), - ); - - StacksChainState::insert_microblock_pubkey_hash(&mut conn, 1, &block_pubkh).unwrap(); - - let height_opt = - StacksChainState::has_microblock_pubkey_hash(&mut conn, &block_pubkh).unwrap(); - assert_eq!(height_opt.unwrap(), 1); - - // make two sets of poisons - let mblock_1_1 = - make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x11; 32]), 123); - let mblock_1_2 = - make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x11; 32]), 123); - assert!(mblock_1_1 != mblock_1_2); - - // report poison (in the same block) - let mut tx_poison_microblock_1 = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&reporter_privk_1).unwrap(), - TransactionPayload::PoisonMicroblock( - mblock_1_1.header.clone(), - mblock_1_2.header.clone(), - ), - ); - - tx_poison_microblock_1.chain_id = 0x80000000; - tx_poison_microblock_1.set_tx_fee(0); - - let mut signer = StacksTransactionSigner::new(&tx_poison_microblock_1); - signer.sign_origin(&reporter_privk_1).unwrap(); - let signed_tx_poison_microblock_1 = signer.get_tx().unwrap(); - - // make two sets of poisons - let mblock_2_1 = - make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x10; 32]), 122); - let mblock_2_2 = - make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x10; 32]), 122); - assert!(mblock_2_1 != mblock_2_2); - - // report poison (in the same block) - let mut tx_poison_microblock_2 = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&reporter_privk_2).unwrap(), - TransactionPayload::PoisonMicroblock( - mblock_2_1.header.clone(), - mblock_2_2.header.clone(), - ), - ); - - tx_poison_microblock_2.chain_id = 0x80000000; - tx_poison_microblock_2.set_tx_fee(0); - - let mut signer = StacksTransactionSigner::new(&tx_poison_microblock_2); - signer.sign_origin(&reporter_privk_2).unwrap(); - let signed_tx_poison_microblock_2 = signer.get_tx().unwrap(); - - // process it! - let (fee, receipt) = StacksChainState::process_transaction( - &mut conn, - &signed_tx_poison_microblock_1, - false, - ASTRules::PrecheckSize, - ) - .unwrap(); - - // there must be a poison record for this microblock, from the reporter, for the microblock - // sequence. - let report_opt = StacksChainState::get_poison_microblock_report(&mut conn, 1).unwrap(); - assert_eq!(report_opt.unwrap(), (reporter_addr_1, 123)); - - // process the second one! - let (fee, receipt) = StacksChainState::process_transaction( - &mut conn, - &signed_tx_poison_microblock_2, - false, - ASTRules::PrecheckSize, - ) - .unwrap(); - - // there must be a poison record for this microblock, from the reporter, for the microblock - // sequence. Moreover, since the fork was earlier in the stream, the second reporter gets - // it. - let report_opt = StacksChainState::get_poison_microblock_report(&mut conn, 1).unwrap(); - assert_eq!(report_opt.unwrap(), (reporter_addr_2, 122)); - - // result must encode poison information - let result_data = receipt.result.expect_tuple().unwrap(); - - let height = result_data - .get("block_height") - .unwrap() - .to_owned() - .expect_u128() - .unwrap(); - let mblock_pubkh = result_data - .get("microblock_pubkey_hash") - .unwrap() - .to_owned() - .expect_buff(20) - .unwrap(); - let reporter = result_data - .get("reporter") - .unwrap() - .to_owned() - .expect_principal() - .unwrap(); - let seq = result_data - .get("sequence") - .unwrap() - .to_owned() - .expect_u128() - .unwrap(); - - assert_eq!(height, 1); - assert_eq!(mblock_pubkh, block_pubkh.0.to_vec()); - assert_eq!(seq, 122); - assert_eq!(reporter, reporter_addr_2.to_account_principal()); - - conn.commit_block(); - } - } - #[test] fn test_get_tx_clarity_version_v205() { struct MockedBurnDB {} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 3d912fbe5b8..b41bbb11599 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -3316,34 +3316,6 @@ fn should_fix_2771() { channel.stop_chains_coordinator(); } -/// Returns a StacksMicroblock with the given transactions, sequence, and parent block that is -/// signed with the given private key. -fn make_signed_microblock( - block_privk: &StacksPrivateKey, - txs: Vec, - parent_block: BlockHeaderHash, - seq: u16, -) -> StacksMicroblock { - let mut rng = rand::thread_rng(); - - let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); - let merkle_tree = MerkleTree::::new(&txid_vecs); - let tx_merkle_root = merkle_tree.root(); - - let mut mblock = StacksMicroblock { - header: StacksMicroblockHeader { - version: rng.gen(), - sequence: seq, - prev_block: parent_block, - tx_merkle_root, - signature: MessageSignature([0u8; 65]), - }, - txs, - }; - mblock.sign(block_privk).unwrap(); - mblock -} - #[test] #[ignore] fn filter_low_fee_tx_integration_test() { From f5214906e90fdd76c1bdb9903c3667666162ce4f Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 14 Mar 2025 07:13:22 -0700 Subject: [PATCH 125/238] fix: use ci-friendly timeouts in test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 0917342f2bd..3bfbf35e88d 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -11340,7 +11340,7 @@ fn rbf_on_config_change() { let mut last_log = Instant::now(); last_log -= Duration::from_secs(5); - wait_for(10, || { + wait_for(30, || { let last_commit = &counters.naka_submitted_commit_last_stacks_tip.get(); if last_log.elapsed() >= Duration::from_secs(5) { info!( @@ -11359,7 +11359,7 @@ fn rbf_on_config_change() { update_config(155000, 57); - wait_for(12, || { + wait_for(30, || { let commit_count = &counters.naka_submitted_commits.get(); Ok(*commit_count > commits_before) }) From 14a4acd3a3a2af8f935fecd485a706e3a49f3a6c Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Fri, 14 Mar 2025 07:16:05 -0700 Subject: [PATCH 126/238] chore: changelog --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 193ae1ebe24..1855340626f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,8 +7,11 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] -### Added" +### Added + - Add fee information to transaction log ending with "success" or "skipped", while building a new block +- When a miner's config file is updated (ie with a new fee rate), a new block commit is issued using + the new values ([#5924](https://github.com/stacks-network/stacks-core/pull/5924)) ### Changed From 9937c410544af53bea51ae7c2527ed15fecd9d4b Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 14 Mar 2025 17:42:54 +0100 Subject: [PATCH 127/238] regressions check for serialization --- libsigner/src/v0/messages.rs | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 18b205e63db..7103ee98386 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -2049,11 +2049,11 @@ mod test { #[test] fn test_deserialize_state_machine_update() { let signer_message = StateMachineUpdate { - burn_block: ConsensusHash::from_bytes(&[0x55; 20]).unwrap(), + burn_block: ConsensusHash([0x55; 20]), burn_block_height: 100, - current_miner_pkh: Hash160::from_data(&[0xab; 32]), - parent_tenure_id: ConsensusHash::from_bytes(&[0x22; 20]).unwrap(), - parent_tenure_last_block: StacksBlockId([0x33u8; 32]), + current_miner_pkh: Hash160([0xab; 20]), + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), parent_tenure_last_block_height: 1, active_signer_protocol_version: 2, local_supported_signer_protocol_version: 3, @@ -2062,6 +2062,20 @@ mod test { let mut bytes = vec![]; signer_message.consensus_serialize(&mut bytes).unwrap(); + // check for raw content for avoiding regressions when structure changes + let raw_signer_message: Vec<&[u8]> = vec![ + /* burn_block*/ &[0x55; 20], + /* burn_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 100], + /* current_miner_pkh */ &[0xab; 20], + /* parent_tenure_id*/ &[0x22; 20], + /* parent_tenure_last_block */ &[0x33; 32], + /* parent_tenure_last_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 1], + /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 2], + /* local_supported_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 3], + ]; + + assert_eq!(bytes, raw_signer_message.concat()); + let signer_message_deserialized = StateMachineUpdate::consensus_deserialize(&mut &bytes[..]).unwrap(); From e28e22b6502716f0483794dde8ece798e27f94f0 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 14 Mar 2025 17:47:59 +0100 Subject: [PATCH 128/238] improved comment on max_execution_time --- stackslib/src/config/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index f27940ad20c..2865fdacce6 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2191,7 +2191,7 @@ pub struct MinerConfig { pub tenure_extend_cost_threshold: u64, /// Define the timeout to apply while waiting for signers responses, based on the amount of rejections pub block_rejection_timeout_steps: HashMap, - /// Define max execution for contract calls + /// Define max execution time for contract calls: transactions taking more than the specified amount of seconds will be rejected pub max_execution_time: Option, } From 3274f084dc4669dd2f0b082f0bca02a5a26bbc3e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 14 Mar 2025 13:46:31 -0400 Subject: [PATCH 129/238] feat: further mempool iteration improvements and testing --- .github/workflows/bitcoin-tests.yml | 2 + stackslib/src/chainstate/stacks/miner.rs | 11 + stackslib/src/core/mempool.rs | 6 + .../src/tests/nakamoto_integrations.rs | 637 +++++++++++++++++- testnet/stacks-node/src/tests/signer/v0.rs | 525 ++++++++++++++- 5 files changed, 1172 insertions(+), 9 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 53c49448918..1e54a43e1a7 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -125,6 +125,8 @@ jobs: - test-name: tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY # This test takes a long time to run, and is meant to be run manually - test-name: tests::nakamoto_integrations::large_mempool + - test-name: tests::nakamoto_integrations::large_mempool_random_fee + - test-name: tests::nakamoto_integrations::larger_mempool steps: ## Setup test environment diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f6515d2ffdf..0dac4f4c029 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2306,6 +2306,17 @@ impl StacksBlockBuilder { let mut loop_result = Ok(()); while block_limit_hit != BlockLimitFunction::LIMIT_REACHED { let mut num_considered = 0; + + // Check if we've been preempted before we attempt mining. + // This is important because otherwise, we will add unnecessary + // contention on the mempool DB. + blocked = + (*settings.miner_status.lock().expect("FATAL: mutex poisoned")).is_blocked(); + if blocked { + info!("Miner stopping due to preemption"); + break; + } + let intermediate_result = mempool.iterate_candidates( epoch_tx, &mut tx_events, diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index d3b83403050..68d00303922 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -844,6 +844,10 @@ const MEMPOOL_SCHEMA_8_NONCE_SORTING: &'static [&'static str] = &[ DROP INDEX IF EXISTS "by_origin"; "#, r#" + -- Add indexes for nonce sorting + CREATE INDEX IF NOT EXISTS by_address_nonce ON nonces(address, nonce); + "#, + r#" INSERT INTO schema_version (version) VALUES (8) "#, ]; @@ -1559,6 +1563,8 @@ impl MemPoolDB { LEFT JOIN nonces AS ns ON m.sponsor_address = ns.address WHERE (no.address IS NULL OR m.origin_nonce = no.nonce) AND (ns.address IS NULL OR m.sponsor_nonce = ns.nonce) + ORDER BY fee_rate DESC + LIMIT 1024 ), address_nonce_ranked AS ( SELECT *, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c0a7d1a822b..a3451b61b45 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::{BTreeMap, HashMap, HashSet}; -use std::fs::File; +use std::fs::{self, File}; use std::io::Cursor; use std::ops::RangeBounds; use std::sync::atomic::{AtomicU64, Ordering}; @@ -33,6 +33,7 @@ use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::{RejectReason, SignerMessage as SignerMessageV0}; use libsigner::{SignerSession, StackerDBSession}; +use rand::{thread_rng, Rng}; use rusqlite::{params, Connection, OptionalExtension, Transaction}; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -11232,7 +11233,7 @@ fn reload_miner_config() { run_loop_thread.join().unwrap(); } -fn insert_tx_in_mempool( +pub fn insert_tx_in_mempool( db_tx: &Transaction, tx_hex: Vec, origin_addr: &StacksAddress, @@ -11254,10 +11255,13 @@ fn insert_tx_in_mempool( block_header_hash, height, accept_time, - tx) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)"; + tx, + fee_rate) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)"; + let origin_addr_str = origin_addr.to_string(); let length = tx_hex.len() as u64; + let fee_rate = fee / length * 30; let txid = { let mut cursor = Cursor::new(&tx_hex); @@ -11277,7 +11281,8 @@ fn insert_tx_in_mempool( block_header_hash, height, Utc::now().timestamp(), - tx_hex + tx_hex, + fee_rate ]; db_tx .execute(sql, args) @@ -11426,7 +11431,6 @@ fn large_mempool() { new_senders.push(recipient_sk); } } - db_tx.commit().unwrap(); info!("Sending first round of funding took {:?}", timer.elapsed()); @@ -11669,3 +11673,624 @@ fn large_mempool() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool from those accounts with random fees between +/// the minimum allowed fee of 180 uSTX and 2000 uSTX. It will then unpause +/// block mining and check how long it takes for the miner to mine the first +/// block, and how long it takes to empty the mempool. +fn large_mempool_random_fee() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + + let sender_signer_sk = Secp256k1PrivateKey::random(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. + // With a fee of 180 - 2000 uSTX per send, we need each account to end up + // with 2001 * 25 = 50_025 uSTX. + // The 260 accounts in the middle will need to have + // (50025 + 180) * 26 = 1_305_330 uSTX. + // The 10 initial accounts will need to have + // (1305330 + 180) * 26 = 33_943_260 uSTX. + let initial_balance = 33_943_260; + for addr in initial_sender_addrs.iter() { + naka_conf.add_initial_balance(PrincipalData::from(*addr).to_string(), initial_balance); + } + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + naka_conf.node.working_dir + ); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_proposed_blocks, + .. + } = run_loop.counters(); + let counters = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + blind_signer(&naka_conf, &signers, &counters); + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 1_305_330, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 50_025, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the first round of transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let timer = Instant::now(); + + // Fill the mempool with the transfers + let db_tx = conn.transaction().unwrap(); + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let fee = thread_rng().gen_range(180..2000); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + fee, + naka_conf.burnchain.chain_id, + &recipient, + 1, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + + info!( + "Sending first round of transfers took {:?}", + timer.elapsed() + ); + + let blocks_proposed_before = naka_proposed_blocks.load(Ordering::SeqCst); + + info!("Mining first round of transfers"); + + let timer = Instant::now(); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(10, || { + let blocks_proposed = naka_proposed_blocks.load(Ordering::SeqCst); + Ok(blocks_proposed > blocks_proposed_before) + }) + .expect("Timed out waiting for first block to be mined"); + + info!( + "Mining first block of first round of transfers took {:?}", + timer.elapsed() + ); + + // Wait for the first round of transfers to all be mined + wait_for(3600, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of transfers to be mined"); + + info!("Mining first round of transfers took {:?}", timer.elapsed()); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +/// This test intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool from those accounts with random fees between +/// the minimum allowed fee of 180 uSTX and 2000 uSTX. It will then unpause +/// block mining and check how long it takes for the miner to mine the first +/// block, and how long it takes to empty the mempool. +fn larger_mempool() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + + let sender_signer_sk = Secp256k1PrivateKey::random(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. + // With a fee of 180 uSTX per send, we need each account to end up with + // 2001 * 25 * 10 = 500_250 uSTX. + // The 260 accounts in the middle will need to have + // (500250 + 180) * 26 = 13_011_180 uSTX. + // The 10 initial accounts will need to have + // (13011180 + 180) * 26 = 338_295_360 uSTX. + let initial_balance = 338_295_360; + for addr in initial_sender_addrs.iter() { + naka_conf.add_initial_balance(PrincipalData::from(*addr).to_string(), initial_balance); + } + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + naka_conf.node.working_dir + ); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_proposed_blocks, + .. + } = run_loop.counters(); + let counters = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + blind_signer(&naka_conf, &signers, &counters); + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 13_011_180, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient_addr.into(), + 500_250, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the first round of transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let timer = Instant::now(); + + // Fill the mempool with the transfers + for _ in 0..10 { + let db_tx = conn.transaction().unwrap(); + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + naka_conf.burnchain.chain_id, + &recipient, + 1, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + } + + info!( + "Sending first round of transfers took {:?}", + timer.elapsed() + ); + + let blocks_proposed_before = naka_proposed_blocks.load(Ordering::SeqCst); + + info!("Mining first round of transfers"); + + let timer = Instant::now(); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(10, || { + let blocks_proposed = naka_proposed_blocks.load(Ordering::SeqCst); + Ok(blocks_proposed > blocks_proposed_before) + }) + .expect("Timed out waiting for first block to be mined"); + + info!( + "Mining first block of first round of transfers took {:?}", + timer.elapsed() + ); + + // Wait for the first round of transfers to all be mined + wait_for(7200, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of transfers to be mined"); + + info!("Mining first round of transfers took {:?}", timer.elapsed()); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 600f33bf5d5..960f4e37ac2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -14,6 +14,7 @@ // along with this program. If not, see . use std::collections::{HashMap, HashSet}; +use std::io::Cursor; use std::ops::Add; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; @@ -21,6 +22,7 @@ use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use std::{env, thread}; +use chrono::Utc; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{ BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, PeerInfo, RejectCode, @@ -29,10 +31,12 @@ use libsigner::v0::messages::{ use libsigner::{ BlockProposal, BlockProposalData, SignerSession, StackerDBSession, VERSION_STRING, }; +use rusqlite::{params, Connection, Transaction}; use stacks::address::AddressHashMode; use stacks::burnchains::Txid; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::LeaderBlockCommitOp; +use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::PoxAddress; @@ -42,6 +46,7 @@ use stacks::chainstate::stacks::miner::{TransactionEvent, TransactionSuccessEven use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, TransactionPayload}; use stacks::codec::StacksMessageCodec; use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig}; +use stacks::core::mempool::MemPoolWalkStrategy; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::getsigner::GetSignerResponse; @@ -89,9 +94,9 @@ use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_controller, - next_block_and_process_new_stacks_block, setup_epoch_3_reward_set, wait_for, - POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, insert_tx_in_mempool, next_block_and, + next_block_and_controller, next_block_and_process_new_stacks_block, setup_epoch_3_reward_set, + wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, get_pox_info, get_sortition_info, @@ -12305,3 +12310,517 @@ fn retry_proposal() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool from those accounts, all with the same fee. It +/// will then unpause block mining and wait for the first block to be mined. +/// Since the default miner configuration specifies to spend 5 seconds mining a +/// block, we expect that this first block should be proposed within 10 seconds +/// and approved within 20 seconds. We also verify that the block contains at +/// least 5,000 transactions, since a lower count than that would indicate a +/// clear regression. +fn large_mempool() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. + // With a fee of 180 uSTX per send, we need each account to end up with + // 181 * 25 * 2 = 9_050 uSTX. + // The 260 accounts in the middle will need to have + // (9050 + 180) * 26 = 239_980 uSTX. + // The 10 initial accounts will need to have + // (239980 + 180) * 26 = 6_244_160 uSTX. + let initial_balance = 6_244_160; + let initial_balances = initial_sender_addrs + .iter() + .map(|addr| (addr.clone(), initial_balance)) + .collect::>(); + + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + |_| {}, + |conf| { + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + }, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + signer_test.running_nodes.conf.node.working_dir + ); + let chain_id = signer_test.running_nodes.conf.burnchain.chain_id; + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 239_980, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 9_050, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + + // Fill the mempool with the transfers + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = + make_stacks_transfer(sender_sk, *nonce, transfer_fee, chain_id, &recipient, 1); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + + info!("Sending transfers took {:?}", timer.elapsed()); + + let proposed_blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + let blocks_before = test_observer::get_blocks().len(); + + info!("Mining transfers..."); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(30, || { + let proposed_blocks = test_observer::get_mined_nakamoto_blocks().len(); + Ok(proposed_blocks > proposed_blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let last_block = blocks.last().unwrap(); + info!( + "First block contains {} transactions", + last_block.tx_events.len() + ); + assert!(last_block.tx_events.len() > 5000); + + // Wait for the first block to be accepted. + wait_for(20, || { + let blocks = test_observer::get_blocks().len(); + Ok(blocks > blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool from those accounts with random fees between +/// the minimum allowed fee of 180 uSTX and 2000 uSTX. It will then unpause +/// block mining and wait for the first block to be mined. Since the default +/// miner configuration specifies to spend 5 seconds mining a block, we expect +/// that this first block should be proposed within 10 seconds and approved +/// within 20 seconds. We also verify that the block contains at least 5,000 +/// transactions, since a lower count than that would indicate a clear +/// regression. +fn large_mempool_random_fee() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. + // With a fee of 180 - 2000 uSTX per send, we need each account to end up + // with 2001 * 25 * 10 = 500_250 uSTX. + // The 260 accounts in the middle will need to have + // (500250 + 180) * 26 = 13_011_180 uSTX. + // The 10 initial accounts will need to have + // (13011180 + 180) * 26 = 338_295_360 uSTX. + let initial_balance = 338_295_360; + let initial_balances = initial_sender_addrs + .iter() + .map(|addr| (addr.clone(), initial_balance)) + .collect::>(); + + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + |_| {}, + |conf| { + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + }, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + signer_test.running_nodes.conf.node.working_dir + ); + let chain_id = signer_test.running_nodes.conf.burnchain.chain_id; + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 13_011_180, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 500_250, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + + // Fill the mempool with the first round of transfers + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = + make_stacks_transfer(sender_sk, *nonce, transfer_fee, chain_id, &recipient, 1); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + + info!("Sending transfers took {:?}", timer.elapsed()); + + let proposed_blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + let blocks_before = test_observer::get_blocks().len(); + + info!("Mining transfers..."); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(30, || { + let proposed_blocks = test_observer::get_mined_nakamoto_blocks().len(); + Ok(proposed_blocks > proposed_blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let last_block = blocks.last().unwrap(); + info!( + "First block contains {} transactions", + last_block.tx_events.len() + ); + assert!(last_block.tx_events.len() > 5000); + + // Wait for the first block to be accepted. + wait_for(20, || { + let blocks = test_observer::get_blocks().len(); + Ok(blocks > blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + signer_test.shutdown(); +} From 3709e96a22c273c5883c2d5b4244f48c2ed73083 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 14 Mar 2025 13:49:53 -0400 Subject: [PATCH 130/238] test: use random fees for `larger_mempool` test --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index a3451b61b45..aa0066f7b50 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -12221,10 +12221,11 @@ fn larger_mempool() { for _ in 0..25 { for (sender_sk, nonce) in senders.iter_mut() { let sender_addr = tests::to_addr(sender_sk); + let fee = thread_rng().gen_range(180..2000); let transfer_tx = make_stacks_transfer( sender_sk, *nonce, - transfer_fee, + fee, naka_conf.burnchain.chain_id, &recipient, 1, @@ -12234,7 +12235,7 @@ fn larger_mempool() { transfer_tx, &sender_addr, *nonce, - transfer_fee, + fee, &tip.consensus_hash, &tip.canonical_stacks_tip_hash, tip.stacks_block_height, From db4dc8cfb4c7bccb8570da368a638ef17449578c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 14 Mar 2025 17:01:11 -0400 Subject: [PATCH 131/238] refactor: move testing utilities to a common location This allows them to be used in tests in stackslib/ or testnet/. --- .../stacks/tests/block_construction.rs | 2 +- stackslib/src/core/mod.rs | 2 + stackslib/src/core/tests/mod.rs | 1 + stackslib/src/core/util.rs | 520 ++++++++++++++++++ stackslib/src/net/mod.rs | 10 - stackslib/src/net/tests/download/nakamoto.rs | 3 +- stackslib/src/net/tests/inv/nakamoto.rs | 3 +- stackslib/src/net/tests/mempool/mod.rs | 1 + testnet/stacks-node/src/tests/epoch_205.rs | 10 +- testnet/stacks-node/src/tests/epoch_21.rs | 3 +- testnet/stacks-node/src/tests/epoch_22.rs | 3 +- testnet/stacks-node/src/tests/epoch_23.rs | 2 + testnet/stacks-node/src/tests/epoch_24.rs | 2 +- testnet/stacks-node/src/tests/epoch_25.rs | 2 +- testnet/stacks-node/src/tests/integrations.rs | 10 +- testnet/stacks-node/src/tests/mempool.rs | 9 +- testnet/stacks-node/src/tests/mod.rs | 459 +--------------- .../src/tests/nakamoto_integrations.rs | 86 +-- .../src/tests/neon_integrations.rs | 10 +- testnet/stacks-node/src/tests/signer/v0.rs | 24 +- 20 files changed, 586 insertions(+), 576 deletions(-) create mode 100644 stackslib/src/core/util.rs diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 0ba5665f9c1..caec1747d64 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5034,7 +5034,7 @@ fn paramaterized_mempool_walk_test( fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { let key_address_pairs: Vec<(Secp256k1PrivateKey, StacksAddress)> = (0..7) .map(|_user_index| { - let privk = StacksPrivateKey::new(); + let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 10aaece8ccb..5d6720d2384 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -34,6 +34,8 @@ pub mod nonce_cache; #[cfg(test)] pub mod tests; +#[cfg(any(test, feature = "testing"))] +pub mod util; use std::cmp::Ordering; pub type StacksEpoch = GenericStacksEpoch; diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index ec5fcf0ec7c..35ef5edd298 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -64,6 +64,7 @@ use crate::core::mempool::{ db_get_all_nonces, MemPoolSyncData, MemPoolWalkSettings, MemPoolWalkTxTypes, TxTag, BLOOM_COUNTER_DEPTH, BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS, }; +use crate::core::util::{insert_tx_in_mempool, make_stacks_transfer, to_addr}; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::net::Error as NetError; use crate::util_lib::bloom::test::setup_bloom_counter; diff --git a/stackslib/src/core/util.rs b/stackslib/src/core/util.rs new file mode 100644 index 00000000000..d49a7a59229 --- /dev/null +++ b/stackslib/src/core/util.rs @@ -0,0 +1,520 @@ +use std::io::Cursor; + +use chrono::Utc; +use clarity::codec::StacksMessageCodec; +use clarity::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, +}; +use clarity::vm::tests::BurnStateDB; +use clarity::vm::types::PrincipalData; +use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; +use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; + +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; +use crate::chainstate::stacks::{ + CoinbasePayload, StacksBlock, StacksMicroblock, StacksMicroblockHeader, StacksTransaction, + StacksTransactionSigner, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionContractCall, TransactionPayload, TransactionPostConditionMode, + TransactionSmartContract, TransactionSpendingCondition, TransactionVersion, +}; +use crate::util_lib::strings::StacksString; + +#[allow(clippy::too_many_arguments)] +pub fn sign_sponsored_sig_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + payer: &StacksPrivateKey, + sender_nonce: u64, + payer_nonce: u64, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> StacksTransaction { + sign_tx_anchor_mode_version( + payload, + sender, + Some(payer), + sender_nonce, + Some(payer_nonce), + tx_fee, + chain_id, + anchor_mode, + version, + ) +} + +pub fn sign_standard_single_sig_tx( + payload: TransactionPayload, + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, +) -> StacksTransaction { + sign_standard_single_sig_tx_anchor_mode( + payload, + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OnChainOnly, + ) +} + +pub fn sign_standard_single_sig_tx_anchor_mode( + payload: TransactionPayload, + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, +) -> StacksTransaction { + sign_standard_single_sig_tx_anchor_mode_version( + payload, + sender, + nonce, + tx_fee, + chain_id, + anchor_mode, + TransactionVersion::Testnet, + ) +} + +pub fn sign_standard_single_sig_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> StacksTransaction { + sign_tx_anchor_mode_version( + payload, + sender, + None, + nonce, + None, + tx_fee, + chain_id, + anchor_mode, + version, + ) +} + +#[allow(clippy::too_many_arguments)] +pub fn sign_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + payer: Option<&StacksPrivateKey>, + sender_nonce: u64, + payer_nonce: Option, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> StacksTransaction { + let mut sender_spending_condition = + TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sender)) + .expect("Failed to create p2pkh spending condition from public key."); + sender_spending_condition.set_nonce(sender_nonce); + + let auth = match (payer, payer_nonce) { + (Some(payer), Some(payer_nonce)) => { + let mut payer_spending_condition = TransactionSpendingCondition::new_singlesig_p2pkh( + StacksPublicKey::from_private(payer), + ) + .expect("Failed to create p2pkh spending condition from public key."); + payer_spending_condition.set_nonce(payer_nonce); + payer_spending_condition.set_tx_fee(tx_fee); + TransactionAuth::Sponsored(sender_spending_condition, payer_spending_condition) + } + _ => { + sender_spending_condition.set_tx_fee(tx_fee); + TransactionAuth::Standard(sender_spending_condition) + } + }; + let mut unsigned_tx = StacksTransaction::new(version, auth, payload); + unsigned_tx.anchor_mode = anchor_mode; + unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; + unsigned_tx.chain_id = chain_id; + + let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); + tx_signer.sign_origin(sender).unwrap(); + if let (Some(payer), Some(_)) = (payer, payer_nonce) { + tx_signer.sign_sponsor(payer).unwrap(); + } + + tx_signer.get_tx().unwrap() +} + +#[allow(clippy::too_many_arguments)] +pub fn serialize_sign_tx_anchor_mode_version( + payload: TransactionPayload, + sender: &StacksPrivateKey, + payer: Option<&StacksPrivateKey>, + sender_nonce: u64, + payer_nonce: Option, + tx_fee: u64, + chain_id: u32, + anchor_mode: TransactionAnchorMode, + version: TransactionVersion, +) -> Vec { + let tx = sign_tx_anchor_mode_version( + payload, + sender, + payer, + sender_nonce, + payer_nonce, + tx_fee, + chain_id, + anchor_mode, + version, + ); + + let mut buf = vec![]; + tx.consensus_serialize(&mut buf).unwrap(); + buf +} + +pub fn make_contract_publish_versioned( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, + version: Option, +) -> Vec { + let name = ContractName::from(contract_name); + let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); + + let payload = + TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); + + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_contract_publish( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, +) -> Vec { + make_contract_publish_versioned( + sender, + nonce, + tx_fee, + chain_id, + contract_name, + contract_content, + None, + ) +} + +pub fn make_contract_publish_microblock_only_versioned( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, + version: Option, +) -> Vec { + let name = ContractName::from(contract_name); + let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); + + let payload = + TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); + + let tx = sign_standard_single_sig_tx_anchor_mode( + payload, + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OffChainOnly, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_contract_publish_microblock_only( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_name: &str, + contract_content: &str, +) -> Vec { + make_contract_publish_microblock_only_versioned( + sender, + nonce, + tx_fee, + chain_id, + contract_name, + contract_content, + None, + ) +} + +pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { + StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(sk)], + ) + .unwrap() +} + +pub fn make_stacks_transfer( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + recipient: &PrincipalData, + amount: u64, +) -> Vec { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +#[allow(clippy::too_many_arguments)] +pub fn make_sponsored_stacks_transfer_on_testnet( + sender: &StacksPrivateKey, + payer: &StacksPrivateKey, + sender_nonce: u64, + payer_nonce: u64, + tx_fee: u64, + chain_id: u32, + recipient: &PrincipalData, + amount: u64, +) -> Vec { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + let tx = sign_sponsored_sig_tx_anchor_mode_version( + payload, + sender, + payer, + sender_nonce, + payer_nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OnChainOnly, + TransactionVersion::Testnet, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_stacks_transfer_mblock_only( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + recipient: &PrincipalData, + amount: u64, +) -> Vec { + let payload = + TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); + let tx = sign_standard_single_sig_tx_anchor_mode( + payload, + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OffChainOnly, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_poison( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + header_1: StacksMicroblockHeader, + header_2: StacksMicroblockHeader, +) -> Vec { + let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { + let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); + let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +#[allow(clippy::too_many_arguments)] +pub fn make_contract_call( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_addr: &StacksAddress, + contract_name: &str, + function_name: &str, + function_args: &[Value], +) -> Vec { + let contract_name = ContractName::from(contract_name); + let function_name = ClarityName::from(function_name); + + let payload = TransactionContractCall { + address: *contract_addr, + contract_name, + function_name, + function_args: function_args.to_vec(), + }; + + let tx = sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +#[allow(clippy::too_many_arguments)] +pub fn make_contract_call_mblock_only( + sender: &StacksPrivateKey, + nonce: u64, + tx_fee: u64, + chain_id: u32, + contract_addr: &StacksAddress, + contract_name: &str, + function_name: &str, + function_args: &[Value], +) -> Vec { + let contract_name = ContractName::from(contract_name); + let function_name = ClarityName::from(function_name); + + let payload = TransactionContractCall { + address: *contract_addr, + contract_name, + function_name, + function_args: function_args.to_vec(), + }; + + let tx = sign_standard_single_sig_tx_anchor_mode( + payload.into(), + sender, + nonce, + tx_fee, + chain_id, + TransactionAnchorMode::OffChainOnly, + ); + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + tx_bytes +} + +pub fn make_microblock( + privk: &StacksPrivateKey, + chainstate: &mut StacksChainState, + burn_dbconn: &dyn BurnStateDB, + consensus_hash: ConsensusHash, + block: StacksBlock, + txs: Vec, +) -> StacksMicroblock { + let mut block_bytes = vec![]; + block.consensus_serialize(&mut block_bytes).unwrap(); + + let mut microblock_builder = StacksMicroblockBuilder::new( + block.block_hash(), + consensus_hash, + chainstate, + burn_dbconn, + BlockBuilderSettings::max_value(), + ) + .unwrap(); + let mempool_txs: Vec<_> = txs + .into_iter() + .map(|tx| { + // TODO: better fee estimation + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes).unwrap(); + (tx, tx_bytes.len() as u64) + }) + .collect(); + + // NOTE: we intentionally do not check the block's microblock pubkey hash against the private + // key, because we may need to test that microblocks get rejected due to bad signatures. + microblock_builder + .mine_next_microblock_from_txs(mempool_txs, privk) + .unwrap() +} + +pub fn insert_tx_in_mempool( + db_tx: &rusqlite::Transaction, + tx_hex: Vec, + origin_addr: &StacksAddress, + origin_nonce: u64, + fee: u64, + consensus_hash: &ConsensusHash, + block_header_hash: &BlockHeaderHash, + height: u64, +) { + let sql = "INSERT OR REPLACE INTO mempool ( + txid, + origin_address, + origin_nonce, + sponsor_address, + sponsor_nonce, + tx_fee, + length, + consensus_hash, + block_header_hash, + height, + accept_time, + tx, + fee_rate) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)"; + + let origin_addr_str = origin_addr.to_string(); + let length = tx_hex.len() as u64; + let fee_rate = fee / length * 30; + + let txid = { + let mut cursor = Cursor::new(&tx_hex); + StacksTransaction::consensus_deserialize(&mut cursor) + .expect("Failed to deserialize transaction") + .txid() + }; + let args = rusqlite::params![ + txid, + origin_addr_str, + origin_nonce, + origin_addr_str, + origin_nonce, + fee, + length, + consensus_hash, + block_header_hash, + height, + Utc::now().timestamp(), + tx_hex, + fee_rate + ]; + db_tx + .execute(sql, args) + .expect("Failed to insert transaction into mempool"); +} diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 616ea8f81fb..73395727202 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -4929,14 +4929,4 @@ pub mod test { acct } } - - pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { - StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(sk)], - ) - .unwrap() - } } diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index cc53f22a4f1..741bc418853 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -43,10 +43,11 @@ use crate::chainstate::stacks::{ TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; +use crate::core::util::to_addr; use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::download::nakamoto::{TenureStartEnd, WantedTenure, *}; use crate::net::inv::nakamoto::NakamotoTenureInv; -use crate::net::test::{dns_thread_start, to_addr, TestEventObserver}; +use crate::net::test::{dns_thread_start, TestEventObserver}; use crate::net::tests::inv::nakamoto::{ make_nakamoto_peer_from_invs, make_nakamoto_peers_from_invs_ext, peer_get_nakamoto_invs, }; diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index 625cb7cd017..c8248ef4524 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -43,10 +43,11 @@ use crate::chainstate::stacks::{ TransactionAuth, TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; +use crate::core::util::to_addr; use crate::core::StacksEpochExtension; use crate::net::inv::nakamoto::{InvGenerator, NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::comms::NeighborComms; -use crate::net::test::{to_addr, TestEventObserver, TestPeer}; +use crate::net::test::{TestEventObserver, TestPeer}; use crate::net::tests::{NakamotoBootPlan, NakamotoBootStep, NakamotoBootTenure}; use crate::net::{ Error as NetError, GetNakamotoInvData, HandshakeData, NakamotoInvData, NeighborAddress, diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 9576ae7e546..81dc0cd43c7 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -32,6 +32,7 @@ use crate::burnchains::*; use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; use crate::chainstate::stacks::test::*; use crate::chainstate::stacks::*; +use crate::core::util::to_addr; use crate::core::StacksEpochExtension; use crate::net::atlas::*; use crate::net::codec::*; diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 7462acd9637..0dbb548461c 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -14,6 +14,10 @@ use stacks::chainstate::stacks::{ StacksBlockHeader, StacksPrivateKey, StacksTransaction, TransactionPayload, }; use stacks::config::{EventKeyType, InitialBalance}; +use stacks::core::util::{ + make_contract_call, make_contract_call_mblock_only, make_contract_publish, + make_contract_publish_microblock_only, to_addr, +}; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, @@ -25,11 +29,7 @@ use stacks_common::util::sleep_ms; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; -use crate::tests::{ - make_contract_call, make_contract_call_mblock_only, make_contract_publish, - make_contract_publish_microblock_only, run_until_burnchain_height, select_transactions_where, - to_addr, -}; +use crate::tests::{run_until_burnchain_height, select_transactions_where}; use crate::{neon, BitcoinRegtestController, BurnchainController, Keychain}; #[test] diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index b287d2dec4b..3d85c0e9099 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -3,7 +3,7 @@ use std::{env, thread}; use ::core::str; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; -use clarity::vm::ClarityVersion; +use clarity::vm::{ClarityVersion, Value}; use stacks::burnchains::bitcoin::address::{ BitcoinAddress, LegacyBitcoinAddressType, SegwitBitcoinAddress, }; @@ -25,6 +25,7 @@ use stacks::chainstate::stacks::miner::{ use stacks::chainstate::stacks::StacksBlockHeader; use stacks::clarity_cli::vm_execute as execute; use stacks::config::{Config, InitialBalance}; +use stacks::core::util::make_contract_call; use stacks::core::{self, EpochList, BURNCHAIN_TX_SEARCH_WINDOW}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{ diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 3b3f8c19088..f1ef3c4dc45 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -2,13 +2,14 @@ use std::collections::HashMap; use std::{env, thread}; use clarity::vm::types::PrincipalData; -use clarity::vm::ClarityVersion; +use clarity::vm::{ClarityVersion, Value}; use stacks::burnchains::{Burnchain, PoxConstants}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready}; use stacks::clarity_cli::vm_execute as execute; use stacks::config::{EventKeyType, EventObserverConfig, InitialBalance}; +use stacks::core::util::{make_contract_call, make_stacks_transfer}; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 057669547a3..1c6c19e970d 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -17,8 +17,10 @@ use std::collections::HashMap; use std::{env, thread}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use clarity::vm::Value; use stacks::burnchains::{Burnchain, PoxConstants}; use stacks::config::InitialBalance; +use stacks::core::util::make_contract_call; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks_common::util::sleep_ms; diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index ffe95720453..fc1fc1a64e3 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -27,6 +27,7 @@ use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::clarity_cli::vm_execute as execute; use stacks::config::InitialBalance; +use stacks::core::util::{make_contract_call, to_addr}; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::consts::STACKS_EPOCH_MAX; @@ -42,7 +43,6 @@ use crate::tests::neon_integrations::{ get_account, get_chain_info, get_pox_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, }; -use crate::tests::{make_contract_call, to_addr}; use crate::{neon, BitcoinRegtestController, BurnchainController}; #[cfg(test)] diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 3864d9c3507..1a1ef463f18 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -18,6 +18,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, PoxConstants}; use stacks::config::InitialBalance; +use stacks::core::util::{make_stacks_transfer_mblock_only, to_addr}; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksPrivateKey; @@ -28,7 +29,6 @@ use crate::tests::neon_integrations::{ get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, }; -use crate::tests::{make_stacks_transfer_mblock_only, to_addr}; use crate::{neon, BitcoinRegtestController, BurnchainController}; #[test] diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index a67d8ae2c89..dbfd48307eb 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -26,6 +26,10 @@ use stacks::clarity_vm::clarity::ClarityConnection; use stacks::codec::StacksMessageCodec; use stacks::config::InitialBalance; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; +use stacks::core::util::{ + make_contract_call, make_contract_publish, make_sponsored_stacks_transfer_on_testnet, + make_stacks_transfer, to_addr, +}; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, @@ -37,12 +41,8 @@ use stacks::net::api::getistraitimplemented::GetIsTraitImplementedResponse; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, VRFSeed}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum}; -use super::{ - make_contract_call, make_contract_publish, make_stacks_transfer, to_addr, ADDR_4, SK_1, SK_2, - SK_3, -}; +use super::{ADDR_4, SK_1, SK_2, SK_3}; use crate::helium::RunLoop; -use crate::tests::make_sponsored_stacks_transfer_on_testnet; const OTHER_CONTRACT: &str = " (define-data-var x uint u0) diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index 5b8a07b56db..b60a0041162 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -15,6 +15,10 @@ use stacks::chainstate::stacks::{ }; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; +use stacks::core::util::{ + make_coinbase, make_contract_call, make_contract_publish, make_poison, make_stacks_transfer, + sign_standard_single_sig_tx_anchor_mode_version, to_addr, +}; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; @@ -24,10 +28,7 @@ use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress}; use stacks_common::util::hash::*; use stacks_common::util::secp256k1::*; -use super::{ - make_coinbase, make_contract_call, make_contract_publish, make_poison, make_stacks_transfer, - sign_standard_single_sig_tx_anchor_mode_version, to_addr, SK_1, SK_2, -}; +use super::{SK_1, SK_2}; use crate::helium::RunLoop; use crate::Keychain; diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index c4230ce61f9..702c7244daa 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -18,29 +18,21 @@ use std::sync::atomic::AtomicU64; use std::sync::{Arc, Mutex}; use clarity::vm::costs::ExecutionCost; -use clarity::vm::database::BurnStateDB; use clarity::vm::events::STXEventType; -use clarity::vm::types::PrincipalData; -use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; use lazy_static::lazy_static; use neon_integrations::test_observer::EVENT_OBSERVER_PORT; use rand::Rng; use stacks::chainstate::burn::ConsensusHash; -use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StacksTransactionEvent; -use stacks::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; use stacks::chainstate::stacks::{ - CoinbasePayload, StacksBlock, StacksMicroblock, StacksMicroblockHeader, StacksPrivateKey, - StacksPublicKey, StacksTransaction, StacksTransactionSigner, TokenTransferMemo, - TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, - TransactionPostConditionMode, TransactionSmartContract, TransactionSpendingCondition, - TransactionVersion, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + StacksPrivateKey, StacksPublicKey, StacksTransaction, TransactionPayload, }; +#[cfg(any(test, feature = "testing"))] +use stacks::core::util::{make_contract_publish, to_addr}; use stacks::core::{StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_TESTNET}; -use stacks::util_lib::strings::StacksString; use stacks_common::address::AddressHashMode; use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress}; +use stacks_common::types::chainstate::BlockHeaderHash; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{hex_bytes, to_hex}; @@ -133,251 +125,6 @@ pub fn insert_new_port(port: u16) -> bool { ports.insert(port) } -#[allow(clippy::too_many_arguments)] -pub fn sign_sponsored_sig_tx_anchor_mode_version( - payload: TransactionPayload, - sender: &StacksPrivateKey, - payer: &StacksPrivateKey, - sender_nonce: u64, - payer_nonce: u64, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, - version: TransactionVersion, -) -> StacksTransaction { - sign_tx_anchor_mode_version( - payload, - sender, - Some(payer), - sender_nonce, - Some(payer_nonce), - tx_fee, - chain_id, - anchor_mode, - version, - ) -} - -pub fn sign_standard_single_sig_tx( - payload: TransactionPayload, - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, -) -> StacksTransaction { - sign_standard_single_sig_tx_anchor_mode( - payload, - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OnChainOnly, - ) -} - -pub fn sign_standard_single_sig_tx_anchor_mode( - payload: TransactionPayload, - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, -) -> StacksTransaction { - sign_standard_single_sig_tx_anchor_mode_version( - payload, - sender, - nonce, - tx_fee, - chain_id, - anchor_mode, - TransactionVersion::Testnet, - ) -} - -pub fn sign_standard_single_sig_tx_anchor_mode_version( - payload: TransactionPayload, - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, - version: TransactionVersion, -) -> StacksTransaction { - sign_tx_anchor_mode_version( - payload, - sender, - None, - nonce, - None, - tx_fee, - chain_id, - anchor_mode, - version, - ) -} - -#[allow(clippy::too_many_arguments)] -pub fn sign_tx_anchor_mode_version( - payload: TransactionPayload, - sender: &StacksPrivateKey, - payer: Option<&StacksPrivateKey>, - sender_nonce: u64, - payer_nonce: Option, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, - version: TransactionVersion, -) -> StacksTransaction { - let mut sender_spending_condition = - TransactionSpendingCondition::new_singlesig_p2pkh(StacksPublicKey::from_private(sender)) - .expect("Failed to create p2pkh spending condition from public key."); - sender_spending_condition.set_nonce(sender_nonce); - - let auth = match (payer, payer_nonce) { - (Some(payer), Some(payer_nonce)) => { - let mut payer_spending_condition = TransactionSpendingCondition::new_singlesig_p2pkh( - StacksPublicKey::from_private(payer), - ) - .expect("Failed to create p2pkh spending condition from public key."); - payer_spending_condition.set_nonce(payer_nonce); - payer_spending_condition.set_tx_fee(tx_fee); - TransactionAuth::Sponsored(sender_spending_condition, payer_spending_condition) - } - _ => { - sender_spending_condition.set_tx_fee(tx_fee); - TransactionAuth::Standard(sender_spending_condition) - } - }; - let mut unsigned_tx = StacksTransaction::new(version, auth, payload); - unsigned_tx.anchor_mode = anchor_mode; - unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; - unsigned_tx.chain_id = chain_id; - - let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); - tx_signer.sign_origin(sender).unwrap(); - if let (Some(payer), Some(_)) = (payer, payer_nonce) { - tx_signer.sign_sponsor(payer).unwrap(); - } - - tx_signer.get_tx().unwrap() -} - -#[allow(clippy::too_many_arguments)] -pub fn serialize_sign_tx_anchor_mode_version( - payload: TransactionPayload, - sender: &StacksPrivateKey, - payer: Option<&StacksPrivateKey>, - sender_nonce: u64, - payer_nonce: Option, - tx_fee: u64, - chain_id: u32, - anchor_mode: TransactionAnchorMode, - version: TransactionVersion, -) -> Vec { - let tx = sign_tx_anchor_mode_version( - payload, - sender, - payer, - sender_nonce, - payer_nonce, - tx_fee, - chain_id, - anchor_mode, - version, - ); - - let mut buf = vec![]; - tx.consensus_serialize(&mut buf).unwrap(); - buf -} - -pub fn make_contract_publish_versioned( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, - version: Option, -) -> Vec { - let name = ContractName::from(contract_name); - let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); - - let payload = - TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - - let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - tx_bytes -} - -pub fn make_contract_publish( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, -) -> Vec { - make_contract_publish_versioned( - sender, - nonce, - tx_fee, - chain_id, - contract_name, - contract_content, - None, - ) -} - -pub fn make_contract_publish_microblock_only_versioned( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, - version: Option, -) -> Vec { - let name = ContractName::from(contract_name); - let code_body = StacksString::from_string(&contract_content.to_string()).unwrap(); - - let payload = - TransactionPayload::SmartContract(TransactionSmartContract { name, code_body }, version); - - let tx = sign_standard_single_sig_tx_anchor_mode( - payload, - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OffChainOnly, - ); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - tx_bytes -} - -pub fn make_contract_publish_microblock_only( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_name: &str, - contract_content: &str, -) -> Vec { - make_contract_publish_microblock_only_versioned( - sender, - nonce, - tx_fee, - chain_id, - contract_name, - contract_content, - None, - ) -} - pub fn new_test_conf() -> Config { // secretKey: "b1cf9cee5083f421c84d7cb53be5edf2801c3c78d63d53917aee0bdc8bd160ee01", // publicKey: "03e2ed46873d0db820e8c6001aabc082d72b5b900b53b7a1b9714fe7bde3037b81", @@ -438,204 +185,6 @@ pub fn set_random_binds(config: &mut Config) { config.node.p2p_address = format!("{localhost}:{p2p_port}"); } -pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { - StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(sk)], - ) - .unwrap() -} - -pub fn make_stacks_transfer( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - recipient: &PrincipalData, - amount: u64, -) -> Vec { - let payload = - TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - tx_bytes -} - -#[allow(clippy::too_many_arguments)] -pub fn make_sponsored_stacks_transfer_on_testnet( - sender: &StacksPrivateKey, - payer: &StacksPrivateKey, - sender_nonce: u64, - payer_nonce: u64, - tx_fee: u64, - chain_id: u32, - recipient: &PrincipalData, - amount: u64, -) -> Vec { - let payload = - TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - let tx = sign_sponsored_sig_tx_anchor_mode_version( - payload, - sender, - payer, - sender_nonce, - payer_nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OnChainOnly, - TransactionVersion::Testnet, - ); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - tx_bytes -} - -pub fn make_stacks_transfer_mblock_only( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - recipient: &PrincipalData, - amount: u64, -) -> Vec { - let payload = - TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - let tx = sign_standard_single_sig_tx_anchor_mode( - payload, - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OffChainOnly, - ); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - tx_bytes -} - -pub fn make_poison( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - header_1: StacksMicroblockHeader, - header_2: StacksMicroblockHeader, -) -> Vec { - let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); - let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - tx_bytes -} - -pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { - let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); - let tx = sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - tx_bytes -} - -#[allow(clippy::too_many_arguments)] -pub fn make_contract_call( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_addr: &StacksAddress, - contract_name: &str, - function_name: &str, - function_args: &[Value], -) -> Vec { - let contract_name = ContractName::from(contract_name); - let function_name = ClarityName::from(function_name); - - let payload = TransactionContractCall { - address: *contract_addr, - contract_name, - function_name, - function_args: function_args.to_vec(), - }; - - let tx = sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - tx_bytes -} - -#[allow(clippy::too_many_arguments)] -pub fn make_contract_call_mblock_only( - sender: &StacksPrivateKey, - nonce: u64, - tx_fee: u64, - chain_id: u32, - contract_addr: &StacksAddress, - contract_name: &str, - function_name: &str, - function_args: &[Value], -) -> Vec { - let contract_name = ContractName::from(contract_name); - let function_name = ClarityName::from(function_name); - - let payload = TransactionContractCall { - address: *contract_addr, - contract_name, - function_name, - function_args: function_args.to_vec(), - }; - - let tx = sign_standard_single_sig_tx_anchor_mode( - payload.into(), - sender, - nonce, - tx_fee, - chain_id, - TransactionAnchorMode::OffChainOnly, - ); - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - tx_bytes -} - -fn make_microblock( - privk: &StacksPrivateKey, - chainstate: &mut StacksChainState, - burn_dbconn: &dyn BurnStateDB, - consensus_hash: ConsensusHash, - block: StacksBlock, - txs: Vec, -) -> StacksMicroblock { - let mut block_bytes = vec![]; - block.consensus_serialize(&mut block_bytes).unwrap(); - - let mut microblock_builder = StacksMicroblockBuilder::new( - block.block_hash(), - consensus_hash, - chainstate, - burn_dbconn, - BlockBuilderSettings::max_value(), - ) - .unwrap(); - let mempool_txs: Vec<_> = txs - .into_iter() - .map(|tx| { - // TODO: better fee estimation - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes).unwrap(); - (tx, tx_bytes.len() as u64) - }) - .collect(); - - // NOTE: we intentionally do not check the block's microblock pubkey hash against the private - // key, because we may need to test that microblocks get rejected due to bad signatures. - microblock_builder - .mine_next_microblock_from_txs(mempool_txs, privk) - .unwrap() -} - /// Deserializes the `StacksTransaction` objects from `blocks` and returns all those that /// match `test_fn`. pub fn select_transactions_where( diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index aa0066f7b50..a362f06fbfb 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -15,7 +15,6 @@ // along with this program. If not, see . use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs::{self, File}; -use std::io::Cursor; use std::ops::RangeBounds; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::mpsc::{channel, Receiver, Sender}; @@ -24,7 +23,6 @@ use std::thread::JoinHandle; use std::time::{Duration, Instant}; use std::{env, thread}; -use chrono::Utc; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; @@ -34,7 +32,7 @@ use lazy_static::lazy_static; use libsigner::v0::messages::{RejectReason, SignerMessage as SignerMessageV0}; use libsigner::{SignerSession, StackerDBSession}; use rand::{thread_rng, Rng}; -use rusqlite::{params, Connection, OptionalExtension, Transaction}; +use rusqlite::{Connection, OptionalExtension}; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ @@ -65,6 +63,9 @@ use stacks::chainstate::stacks::{ }; use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::mempool::{MemPoolWalkStrategy, MAXIMUM_MEMPOOL_TX_CHAINING}; +use stacks::core::util::{ + insert_tx_in_mempool, make_contract_call, make_contract_publish_versioned, make_stacks_transfer, +}; use stacks::core::{ EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, @@ -116,10 +117,7 @@ use crate::tests::neon_integrations::{ run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, wait_for_runloop, }; use crate::tests::signer::SignerTest; -use crate::tests::{ - gen_random_port, get_chain_info, make_contract_call, make_contract_publish, - make_contract_publish_versioned, make_stacks_transfer, to_addr, -}; +use crate::tests::{gen_random_port, get_chain_info, make_contract_publish, to_addr}; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; @@ -890,7 +888,7 @@ pub fn boot_to_epoch_3( let signer_pk = StacksPublicKey::from_private(signer_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -953,7 +951,7 @@ pub fn boot_to_epoch_3( let signer_index = get_signer_index(&signer_set, &Secp256k1PublicKey::from_private(signer_sk)) .unwrap(); - let voting_tx = tests::make_contract_call( + let voting_tx = make_contract_call( signer_sk, 0, 300, @@ -1052,7 +1050,7 @@ pub fn boot_to_pre_epoch_3_boundary( let signer_pk = StacksPublicKey::from_private(signer_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -1115,7 +1113,7 @@ pub fn boot_to_pre_epoch_3_boundary( let signer_index = get_signer_index(&signer_set, &Secp256k1PublicKey::from_private(signer_sk)) .unwrap(); - let voting_tx = tests::make_contract_call( + let voting_tx = make_contract_call( signer_sk, 0, 300, @@ -1290,7 +1288,7 @@ pub fn setup_epoch_3_reward_set( .to_rsv(); let signer_pk = StacksPublicKey::from_private(signer_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -2734,7 +2732,7 @@ fn correct_burn_outs() { .unwrap() .to_rsv(); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( account.0, account.2.nonce, 1000, @@ -4678,7 +4676,7 @@ fn burn_ops_integration_test() { let signer_key_arg_1: StacksPublicKeyBuffer = signer_pk_1.to_bytes_compressed().as_slice().into(); - let set_signer_key_auth_tx = tests::make_contract_call( + let set_signer_key_auth_tx = make_contract_call( &signer_sk_1, 1, 500, @@ -6283,7 +6281,7 @@ fn clarity_burn_state() { // Pause mining to prevent the stacks block from being mined before the tenure change is processed TEST_MINE_STALL.set(true); // Submit a tx for the next block (the next block will be a new tenure, so the burn block height will increment) - let call_tx = tests::make_contract_call( + let call_tx = make_contract_call( &sender_sk, sender_nonce, tx_fee, @@ -6372,7 +6370,7 @@ fn clarity_burn_state() { result.expect_result_ok().expect("Read-only call failed"); // Submit a tx to trigger the next block - let call_tx = tests::make_contract_call( + let call_tx = make_contract_call( &sender_sk, sender_nonce, tx_fee, @@ -11233,62 +11231,6 @@ fn reload_miner_config() { run_loop_thread.join().unwrap(); } -pub fn insert_tx_in_mempool( - db_tx: &Transaction, - tx_hex: Vec, - origin_addr: &StacksAddress, - origin_nonce: u64, - fee: u64, - consensus_hash: &ConsensusHash, - block_header_hash: &BlockHeaderHash, - height: u64, -) { - let sql = "INSERT OR REPLACE INTO mempool ( - txid, - origin_address, - origin_nonce, - sponsor_address, - sponsor_nonce, - tx_fee, - length, - consensus_hash, - block_header_hash, - height, - accept_time, - tx, - fee_rate) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)"; - - let origin_addr_str = origin_addr.to_string(); - let length = tx_hex.len() as u64; - let fee_rate = fee / length * 30; - - let txid = { - let mut cursor = Cursor::new(&tx_hex); - StacksTransaction::consensus_deserialize(&mut cursor) - .expect("Failed to deserialize transaction") - .txid() - }; - let args = params![ - txid, - origin_addr_str, - origin_nonce, - origin_addr_str, - origin_nonce, - fee, - length, - consensus_hash, - block_header_hash, - height, - Utc::now().timestamp(), - tx_hex, - fee_rate - ]; - db_tx - .execute(sql, args) - .expect("Failed to insert transaction into mempool"); -} - #[test] #[ignore] /// This test intends to check the timing of the mempool iteration when there diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 68b8474efb3..e805df59cdd 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -42,6 +42,10 @@ use stacks::cli; use stacks::codec::StacksMessageCodec; use stacks::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use stacks::core::mempool::MemPoolWalkTxTypes; +use stacks::core::util::{ + make_contract_call, make_contract_publish, make_contract_publish_microblock_only, + make_microblock, make_stacks_transfer, make_stacks_transfer_mblock_only, to_addr, +}; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, BLOCK_LIMIT_MAINNET_21, CHAIN_ID_TESTNET, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, @@ -78,11 +82,7 @@ use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, sleep_ms}; use super::bitcoin_regtest::BitcoinCoreController; -use super::{ - make_contract_call, make_contract_publish, make_contract_publish_microblock_only, - make_microblock, make_stacks_transfer, make_stacks_transfer_mblock_only, to_addr, ADDR_4, SK_1, - SK_2, SK_3, -}; +use super::{ADDR_4, SK_1, SK_2, SK_3}; use crate::burnchains::bitcoin_regtest_controller::{self, addr2str, BitcoinRPCRequest, UTXO}; use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 960f4e37ac2..474ec414661 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -14,7 +14,6 @@ // along with this program. If not, see . use std::collections::{HashMap, HashSet}; -use std::io::Cursor; use std::ops::Add; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; @@ -22,7 +21,6 @@ use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use std::{env, thread}; -use chrono::Utc; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{ BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, PeerInfo, RejectCode, @@ -31,12 +29,11 @@ use libsigner::v0::messages::{ use libsigner::{ BlockProposal, BlockProposalData, SignerSession, StackerDBSession, VERSION_STRING, }; -use rusqlite::{params, Connection, Transaction}; +use rusqlite::Connection; use stacks::address::AddressHashMode; use stacks::burnchains::Txid; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::LeaderBlockCommitOp; -use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::PoxAddress; @@ -47,6 +44,9 @@ use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, Transacti use stacks::codec::StacksMessageCodec; use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig}; use stacks::core::mempool::MemPoolWalkStrategy; +use stacks::core::util::{ + insert_tx_in_mempool, make_contract_call, make_contract_publish, make_stacks_transfer, +}; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::getsigner::GetSignerResponse; @@ -94,18 +94,16 @@ use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ - boot_to_epoch_25, boot_to_epoch_3_reward_set, insert_tx_in_mempool, next_block_and, - next_block_and_controller, next_block_and_process_new_stacks_block, setup_epoch_3_reward_set, - wait_for, POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, + boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_controller, + next_block_and_process_new_stacks_block, setup_epoch_3_reward_set, wait_for, + POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, get_pox_info, get_sortition_info, get_sortition_info_ch, next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; -use crate::tests::{ - self, gen_random_port, make_contract_call, make_contract_publish, make_stacks_transfer, -}; +use crate::tests::{self, gen_random_port}; use crate::{nakamoto_node, BitcoinRegtestController, BurnchainController, Config, Keychain}; impl SignerTest { @@ -161,7 +159,7 @@ impl SignerTest { .to_rsv(); let signer_pk = StacksPublicKey::from_private(stacker_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -4742,7 +4740,7 @@ fn signer_set_rollover() { .to_rsv(); let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( stacker_sk, 0, 1000, @@ -9231,7 +9229,7 @@ fn injected_signatures_are_ignored_across_boundaries() { .to_rsv(); let signer_pk = Secp256k1PublicKey::from_private(&new_signer_private_key); - let stacking_tx = tests::make_contract_call( + let stacking_tx = make_contract_call( &new_signer_private_key, 0, 1000, From 031a8242fe76a1e3f3aa622c0bb2a10692a2dedc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 14 Mar 2025 17:05:20 -0400 Subject: [PATCH 132/238] test: add test for mempool walk with large mempool --- stackslib/src/core/tests/mod.rs | 86 +++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 35ef5edd298..fefd1ef910c 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -39,6 +39,7 @@ use stacks_common::util::secp256k1::{MessageSignature, *}; use stacks_common::util::vrf::VRFProof; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log, sleep_ms}; +use super::mempool::MemPoolWalkStrategy; use super::MemPoolDB; use crate::burnchains::{Address, Txid}; use crate::chainstate::burn::ConsensusHash; @@ -2778,3 +2779,88 @@ fn test_filter_txs_by_type() { }, ); } + +#[test] +fn large_mempool() { + let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); + let chainstate_path = chainstate_path(function_name!()); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + + let mut senders = (0..1024) + .map(|_| (StacksPrivateKey::random(), 0)) + .collect::>(); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let b = make_block( + &mut chainstate, + ConsensusHash([0x2; 20]), + &( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ), + 2, + 2, + ); + let block_height = 10; + + println!("Adding transactions to mempool"); + let mempool_tx = mempool.tx_begin().unwrap(); + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = to_addr(sender_sk); + let fee = thread_rng().gen_range(180..2000); + let transfer_tx = + make_stacks_transfer(sender_sk, *nonce, fee, 0x80000000, &recipient, 1); + insert_tx_in_mempool( + &mempool_tx, + transfer_tx, + &sender_addr, + *nonce, + fee, + &ConsensusHash([0x2; 20]), + &FIRST_STACKS_BLOCK_HASH, + block_height, + ); + *nonce += 1; + } + } + mempool_tx.commit().unwrap(); + + let mut mempool_settings = MemPoolWalkSettings::default(); + mempool_settings.strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + let mut tx_events = Vec::new(); + + println!("Iterating mempool"); + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b.0, &b.1), + |clarity_conn| { + let mut count_txs = 0; + mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + &mut tx_events, + mempool_settings.clone(), + |_, available_tx, _| { + count_txs += 1; + Ok(Some( + // Generate any success result + TransactionResult::success( + &available_tx.tx.tx, + available_tx.tx.metadata.tx_fee, + StacksTransactionReceipt::from_stx_transfer( + available_tx.tx.tx.clone(), + vec![], + Value::okay(Value::Bool(true)).unwrap(), + ExecutionCost::ZERO, + ), + ) + .convert_to_event(), + )) + }, + ) + .unwrap(); + // It should be able to iterate through at least 10000 transactions in 5s + assert!(count_txs > 10000); + }, + ); +} From 97c380772e5673f04b8218e4d704eb46c38a9d85 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 14 Mar 2025 17:35:52 -0400 Subject: [PATCH 133/238] chore: remove unused import --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index e60ea66e74e..1bc23ed955a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::{BTreeMap, HashMap, HashSet}; -use std::fs::{self, File}; +use std::fs::File; use std::ops::RangeBounds; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::mpsc::{channel, Receiver, Sender}; From 7f9380667049f10a6607755933fa5a46b59a25a2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sat, 15 Mar 2025 09:30:18 -0400 Subject: [PATCH 134/238] test: fix `large_mempool_random_fee` --- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 83f7a8cbbbf..7966bd09ef0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12753,6 +12753,7 @@ fn large_mempool_random_fee() { |_| {}, |conf| { conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; }, None, None, From 36f2cffb2ef14ffc6a62c7495a7bb9749ba67ee1 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sat, 15 Mar 2025 12:59:36 -0400 Subject: [PATCH 135/238] chore: adjust `LIMIT` in mempool query 11650 is the maximum number of transactions possible in one block. --- stackslib/src/core/mempool.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 68d00303922..05147ddede8 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1563,8 +1563,8 @@ impl MemPoolDB { LEFT JOIN nonces AS ns ON m.sponsor_address = ns.address WHERE (no.address IS NULL OR m.origin_nonce = no.nonce) AND (ns.address IS NULL OR m.sponsor_nonce = ns.nonce) - ORDER BY fee_rate DESC - LIMIT 1024 + ORDER BY accept_time ASC + LIMIT 11650 -- max transactions that can fit in one block ), address_nonce_ranked AS ( SELECT *, From 3c7769dc40af049ac81209ff8fad515fa0d28ffb Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Mon, 17 Mar 2025 14:10:41 +0100 Subject: [PATCH 136/238] prepare for integration test --- testnet/stacks-node/src/tests/signer/v0.rs | 230 +++++++++++++++++++++ 1 file changed, 230 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index edfdd5f7de6..61ce7d42fc3 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -766,6 +766,63 @@ impl MultipleMinerTest { Ok(txid) } + pub fn send_contract_publish(&mut self, contract_name: &str, contract_src: &str) -> String { + let http_origin = format!( + "http://{}", + &self.signer_test.running_nodes.conf.node.rpc_bind + ); + let contract_tx = make_contract_publish( + &self.sender_sk, + self.sender_nonce, + self.send_fee, + self.signer_test.running_nodes.conf.burnchain.chain_id, + contract_name, + contract_src, + ); + self.sender_nonce += 1; + submit_tx(&http_origin, &contract_tx) + } + + /// Sends a contract publish tx to the stacks node and waits for the stacks node to mine it + /// Returns the txid of the transfer tx. + pub fn send_and_mine_contract_publish( + &mut self, + contract_name: &str, + contract_src: &str, + timeout_secs: u64, + ) -> Result { + let stacks_height_before = self.get_peer_stacks_tip_height(); + let txid = self.send_contract_publish(contract_name, contract_src); + wait_for(timeout_secs, || { + Ok(self.get_peer_stacks_tip_height() > stacks_height_before) + })?; + Ok(txid) + } + + pub fn send_contract_call( + &mut self, + contract_name: &str, + function_name: &str, + function_args: &[clarity::vm::Value], + ) -> String { + let http_origin = format!( + "http://{}", + &self.signer_test.running_nodes.conf.node.rpc_bind + ); + let contract_tx = make_contract_call( + &self.sender_sk, + self.sender_nonce, + self.send_fee, + self.signer_test.running_nodes.conf.burnchain.chain_id, + &tests::to_addr(&self.sender_sk), + contract_name, + function_name, + function_args, + ); + self.sender_nonce += 1; + submit_tx(&http_origin, &contract_tx) + } + /// Return the Peer Info from node 1 pub fn get_peer_info(&self) -> PeerInfo { self.signer_test.get_peer_info() @@ -935,6 +992,23 @@ fn last_block_contains_tenure_change_tx(cause: TenureChangeCause) -> bool { } } +// Returns whether the last block in the test observer contains a tenure change +/// transaction with the given cause. +fn last_block_contains_txid(txid: &str) -> bool { + let blocks = test_observer::get_blocks(); + let last_block = &blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + for tx in transactions { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if parsed.txid().to_string() == txid { + return true; + } + } + false +} + /// Asserts that the last block in the test observer contains a tenure change with the given cause. fn verify_last_block_contains_tenure_change_tx(cause: TenureChangeCause) { assert!(last_block_contains_tenure_change_tx(cause)); @@ -12438,3 +12512,159 @@ fn signer_can_accept_rejected_block() { signer_test.shutdown(); } + +/// Test a scenario where: +/// Two miners boot to Nakamoto. +/// Sortition occurs. Miner 1 wins. +/// Miner 1 proposes a block N +/// Signers accept and the stacks tip advances to N +/// Miner 1's block commits are paused so it cannot confirm the next tenure. +/// Sortition occurs. Miner 2 wins. +/// Miner 2 successfully mines blocks N+1, N+2, and N+3 +/// Sortition occurs quickly, within first_proposal_burn_block_timing_secs. Miner 1 wins. +/// Miner 1 proposes block N+1' but gets rejected as more than one block has been mined in the current tenure (by miner2) +#[test] +#[ignore] +fn miner_rejection_by_contract_call_execution_time_expired() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let num_txs = 3; + + let mut miners = MultipleMinerTest::new_with_config_modifications( + num_signers, + num_txs, + |signer_config| { + // Lets make sure we never time out since we need to stall some things to force our scenario + signer_config.block_proposal_validation_timeout = Duration::from_secs(1800); + signer_config.tenure_last_block_proposal_timeout = Duration::from_secs(1800); + signer_config.first_proposal_burn_block_timing = Duration::from_secs(1800); + }, + |config| config.miner.max_execution_time = Some(0), + |config| config.miner.max_execution_time = None, + ); + let rl1_skip_commit_op = miners + .signer_test + .running_nodes + .counters + .naka_skip_commit_op + .clone(); + let rl2_skip_commit_op = miners.rl2_counters.naka_skip_commit_op.clone(); + + let (conf_1, _) = miners.get_node_configs(); + let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); + let (miner_pk_1, miner_pk_2) = miners.get_miner_public_keys(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + miners.boot_to_epoch_3(); + + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + info!("------------------------- Pause Miner 1's Block Commits -------------------------"); + rl1_skip_commit_op.set(true); + + // First, lets deploy the contract + let dummy_contract_src = " + (define-public (run-f) + (ok (1))) + "; + + info!("------------------------- Miner 1 Mines a Nakamoto Block N -------------------------"); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block followed by Block N"); + + // First, lets deploy the contract + let _contract_publish_txid = + miners.send_and_mine_contract_publish("dummy-contract", dummy_contract_src, 60); + + let stacks_height_before = miners.get_peer_stacks_tip_height(); + + // try calling it (has to fail) + let contract_call_txid = miners.send_contract_call("dummy-contract", "f", &[]); + + let miner_1_block_n = + wait_for_block_pushed_by_miner_key(30, stacks_height_before + 1, &miner_pk_1) + .expect("Failed to get block N+1"); + + assert_eq!(last_block_contains_txid(&contract_call_txid), false); + + // assure we have a successful sortition that miner 1 won + verify_sortition_winner(&sortdb, &miner_pkh_1); + + info!("------------------------- Miner 2 Submits a Block Commit -------------------------"); + miners.submit_commit_miner_2(&sortdb); + + info!("------------------------- Pause Miner 2's Block Mining -------------------------"); + TEST_MINE_STALL.set(true); + + info!("------------------------- Mine Tenure -------------------------"); + miners + .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 60) + .expect("Failed to mine BTC block"); + + info!("------------------------- Miner 1 Submits a Block Commit -------------------------"); + miners.submit_commit_miner_1(&sortdb); + + info!("------------------------- Miner 2 Mines Block N+2 -------------------------"); + + TEST_MINE_STALL.set(false); + let _ = wait_for_block_pushed_by_miner_key(30, block_n_height + 1, &miner_pk_2) + .expect("Failed to get block N+1"); + + // assure we have a successful sortition that miner 2 won + verify_sortition_winner(&sortdb, &miner_pkh_2); + + assert_eq!( + get_chain_info(&conf_1).stacks_tip_height, + block_n_height + 1 + ); + + info!("------------------------- Miner 2 Mines N+2 and N+3 -------------------------"); + miners + .send_and_mine_transfer_tx(30) + .expect("Failed to send and mine transfer tx"); + miners + .send_and_mine_transfer_tx(30) + .expect("Failed to send and mine transfer tx"); + assert_eq!( + get_chain_info(&conf_1).stacks_tip_height, + block_n_height + 3 + ); + + info!("------------------------- Miner 1 Wins the Next Tenure, Mines N+3 -------------------------"); + miners.btc_regtest_controller_mut().build_next_block(1); + + let _ = wait_for_block_pushed_by_miner_key(30, block_n_height + 1, &miner_pk_2) + .expect("Failed to get block N+3"); + + // check N+2 contains the contract call (previously rejected by miner 1) + let miner1_blocks_after_boot_to_epoch3 = get_nakamoto_headers(&conf_1) + .into_iter() + .filter(|block| { + // skip first nakamoto block + if block.stacks_block_height == stacks_height_before { + return false; + } + let nakamoto_block_header = block.anchored_header.as_stacks_nakamoto().unwrap(); + miner_pk_1 + .verify( + nakamoto_block_header.miner_signature_hash().as_bytes(), + &nakamoto_block_header.miner_signature, + ) + .unwrap() + }) + .count(); + + assert_eq!(miner1_blocks_after_boot_to_epoch3, 1); + + info!("------------------------- Shutdown -------------------------"); + miners.shutdown(); +} From fcaf8c569160d48e7d1c776f9e0375401f39d694 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 17 Mar 2025 09:27:44 -0400 Subject: [PATCH 137/238] test: add `tests::signer::v0::larger_mempool` --- .github/workflows/bitcoin-tests.yml | 3 +- testnet/stacks-node/src/tests/signer/v0.rs | 264 +++++++++++++++++++++ 2 files changed, 266 insertions(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 1e54a43e1a7..169ba7272a8 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -123,10 +123,11 @@ jobs: - test-name: tests::epoch_24::verify_auto_unlock_behavior # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition - test-name: tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY - # This test takes a long time to run, and is meant to be run manually + # These mempool tests take a long time to run, and are meant to be run manually - test-name: tests::nakamoto_integrations::large_mempool - test-name: tests::nakamoto_integrations::large_mempool_random_fee - test-name: tests::nakamoto_integrations::larger_mempool + - test-name: tests::signer::v0::larger_mempool steps: ## Setup test environment diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7966bd09ef0..322b3a00e53 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12956,3 +12956,267 @@ fn large_mempool_random_fee() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test intends to check the timing of the mempool iteration when there +/// are a large number of transactions in the mempool. It will boot to epoch 3, +/// fan out some STX transfers to a large number of accounts, wait for these to +/// all be mined, and then pause block mining, and submit a large number of +/// transactions to the mempool from those accounts, all with the same fee. It +/// will then unpause block mining and wait for the first block to be mined. +/// Since the default miner configuration specifies to spend 5 seconds mining a +/// block, we expect that this first block should be proposed within 10 seconds +/// and approved within 20 seconds. We also verify that the block contains at +/// least 5,000 transactions, since a lower count than that would indicate a +/// clear regression. +fn larger_mempool() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let transfer_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + // Start with 10 accounts with initial balances. + let initial_sender_sks = (0..10) + .map(|_| StacksPrivateKey::random()) + .collect::>(); + let initial_sender_addrs = initial_sender_sks + .iter() + .map(|sk| tests::to_addr(sk)) + .collect::>(); + + // These 10 accounts will send to 25 accounts each, then those 260 accounts + // will send to 25 accounts each, for a total of 6760 accounts. + // At the end of the funding round, we want to have 6760 accounts with + // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. + // With a fee of 180 uSTX per send, we need each account to end up with + // 2001 * 25 * 10 = 500_250 uSTX. + // The 260 accounts in the middle will need to have + // (500250 + 180) * 26 = 13_011_180 uSTX. + // The 10 initial accounts will need to have + // (13011180 + 180) * 26 = 338_295_360 uSTX. + let initial_balance = 338_295_360; + let initial_balances = initial_sender_addrs + .iter() + .map(|addr| (addr.clone(), initial_balance)) + .collect::>(); + + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + |_| {}, + |conf| { + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + }, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + // This will hold tuples for all of our senders, with the sender pk and + // the nonce + let mut senders = initial_sender_sks + .iter() + .map(|sk| (sk, 0)) + .collect::>(); + + let mempool_db_path = format!( + "{}/nakamoto-neon/chainstate/mempool.sqlite", + signer_test.running_nodes.conf.node.working_dir + ); + let chain_id = signer_test.running_nodes.conf.burnchain.chain_id; + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // Open a sqlite DB at mempool_db_path so that we can quickly add + // transactions to the mempool. + let mut conn = Connection::open(&mempool_db_path).unwrap(); + let db_tx = conn.transaction().unwrap(); + + info!("Sending the first round of funding"); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 13_011_180, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending first round of funding took {:?}", timer.elapsed()); + + // Wait for the first round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of funding to be mined"); + + info!( + "Sending and mining first round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Sending the second round of funding"); + let db_tx = conn.transaction().unwrap(); + let timer = Instant::now(); + let mut new_senders = vec![]; + for (sender_sk, nonce) in senders.iter_mut() { + for _ in 0..25 { + let sender_addr = tests::to_addr(sender_sk); + let recipient_sk = StacksPrivateKey::random(); + let recipient_addr = tests::to_addr(&recipient_sk); + let transfer_tx = make_stacks_transfer( + sender_sk, + *nonce, + transfer_fee, + chain_id, + &recipient_addr.into(), + 500_250, + ); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + new_senders.push(recipient_sk); + } + } + db_tx.commit().unwrap(); + + info!("Sending second round of funding took {:?}", timer.elapsed()); + + // Wait for the second round of funding to be mined + wait_for(120, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for second round of funding to be mined"); + + info!( + "Sending and mining second round of funding took {:?}", + timer.elapsed() + ); + + // Add the new senders to the list of senders + senders.extend(new_senders.iter().map(|sk| (sk, 0))); + + info!("Pause mining and fill the mempool with the transfers"); + + // Pause block mining + TEST_MINE_STALL.set(true); + + let timer = Instant::now(); + + // Fill the mempool with the transfers + for _ in 0..10 { + let db_tx = conn.transaction().unwrap(); + for _ in 0..25 { + for (sender_sk, nonce) in senders.iter_mut() { + let sender_addr = tests::to_addr(sender_sk); + let transfer_tx = + make_stacks_transfer(sender_sk, *nonce, transfer_fee, chain_id, &recipient, 1); + insert_tx_in_mempool( + &db_tx, + transfer_tx, + &sender_addr, + *nonce, + transfer_fee, + &tip.consensus_hash, + &tip.canonical_stacks_tip_hash, + tip.stacks_block_height, + ); + *nonce += 1; + } + } + db_tx.commit().unwrap(); + } + + info!("Sending transfers took {:?}", timer.elapsed()); + + let proposed_blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + + info!("Mining transfers..."); + + // Unpause block mining + TEST_MINE_STALL.set(false); + + // Wait for the first block to be proposed. + wait_for(30, || { + let proposed_blocks = test_observer::get_mined_nakamoto_blocks().len(); + Ok(proposed_blocks > proposed_blocks_before) + }) + .expect("Timed out waiting for first block to be mined"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let last_block = blocks.last().unwrap(); + info!( + "First block contains {} transactions", + last_block.tx_events.len() + ); + + // Wait for the first round of transfers to all be mined + wait_for(43200, || { + for (sender_sk, nonce) in senders.iter() { + let sender_addr = tests::to_addr(sender_sk); + let account = get_account(&http_origin, &sender_addr); + if account.nonce < *nonce { + return Ok(false); + } + } + Ok(true) + }) + .expect("Timed out waiting for first round of transfers to be mined"); + + info!("Mining first round of transfers took {:?}", timer.elapsed()); + signer_test.shutdown(); +} From 0b25c2cd17389d9864bd29921c46c79a75e050fb Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 17 Mar 2025 10:01:15 -0400 Subject: [PATCH 138/238] chore: fix merge conflict --- stackslib/src/chainstate/stacks/tests/block_construction.rs | 1 - stackslib/src/core/tests/mod.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index b21d88823e9..472d71a2343 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5221,7 +5221,6 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index d1432481d46..f964d552cbb 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -2832,7 +2832,6 @@ fn large_mempool() { // Generate any success result TransactionResult::success( &available_tx.tx.tx, - available_tx.tx.metadata.tx_fee, StacksTransactionReceipt::from_stx_transfer( available_tx.tx.tx.clone(), vec![], From 38b84643325e01480c342e7f1af1629af542f762 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 17 Mar 2025 11:14:40 -0400 Subject: [PATCH 139/238] chore: put candidate cache back for `GlobalFeeRate` strategy --- stackslib/src/core/mempool.rs | 139 +++++++++++++++++++++++++++++----- 1 file changed, 118 insertions(+), 21 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 05147ddede8..b253bd901f6 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1060,6 +1060,76 @@ pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_ Ok(ret) } +/// Cache potential candidate transactions for subsequent iterations. +/// While walking the mempool, transactions that have nonces that are too high +/// to process yet (but could be processed in the future) are added to `next`. +/// In the next pass, `next` is moved to `cache` and these transactions are +/// checked before reading more from the mempool DB. +struct CandidateCache { + cache: VecDeque, + next: VecDeque, + /// The maximum size that this cache can be. + max_cache_size: usize, +} + +impl CandidateCache { + fn new(candidate_retry_cache_size: usize) -> Self { + let max_size: usize = candidate_retry_cache_size + .try_into() + .expect("Could not cast `candidate_retry_cache_size` as usize."); + Self { + cache: VecDeque::new(), + next: VecDeque::new(), + max_cache_size: max_size, + } + } + + /// Retrieve the next candidate transaction from the cache. + fn next(&mut self) -> Option { + self.cache.pop_front() + } + + /// Push a candidate to the cache for the next iteration. + fn push(&mut self, tx: MemPoolTxInfoPartial) { + if self.next.len() < self.max_cache_size { + self.next.push_back(tx); + } + + #[cfg(test)] + assert!(self.cache.len() + self.next.len() <= self.max_cache_size); + } + + /// Prepare for the next iteration, transferring transactions from `next` to `cache`. + fn reset(&mut self) { + // We do not need a size check here, because the cache can only grow in size + // after `cache` is empty. New transactions are not walked until the entire + // cache has been walked, so whenever we are adding brand new transactions to + // the cache, `cache` must, by definition, be empty. The size of `next` + // can grow beyond the previous iteration's cache, and that is limited inside + // the `push` method. + self.next.append(&mut self.cache); + self.cache = std::mem::take(&mut self.next); + + #[cfg(test)] + { + assert!(self.cache.len() <= self.max_cache_size + 1); + assert!(self.next.len() <= self.max_cache_size + 1); + } + } + + /// Total length of the cache. + #[cfg_attr(test, mutants::skip)] + fn len(&self) -> usize { + self.cache.len() + self.next.len() + } + + /// Is the cache empty? + #[cfg_attr(test, mutants::skip)] + fn is_empty(&self) -> bool { + self.cache.is_empty() && self.next.is_empty() + } +} + /// Evaluates the pair of nonces, to determine an order /// /// Returns: @@ -1510,12 +1580,13 @@ impl MemPoolDB { // consideration. let tx_consideration_sampler = Uniform::new(0, 100); let mut rng = rand::thread_rng(); + let mut candidate_cache = CandidateCache::new(settings.candidate_retry_cache_size); let sql = " SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate FROM mempool WHERE fee_rate IS NULL "; - let mut query_stmt_null = self.db.prepare(&sql).map_err(Error::SqliteError)?; + let mut query_stmt_null = self.db.prepare(sql).map_err(Error::SqliteError)?; let mut null_iterator = query_stmt_null .query(NO_PARAMS) .map_err(Error::SqliteError)?; @@ -1525,7 +1596,7 @@ impl MemPoolDB { WHERE fee_rate IS NOT NULL ORDER BY fee_rate DESC "; - let mut query_stmt_fee = self.db.prepare(&sql).map_err(Error::SqliteError)?; + let mut query_stmt_fee = self.db.prepare(sql).map_err(Error::SqliteError)?; let mut fee_iterator = query_stmt_fee .query(NO_PARAMS) .map_err(Error::SqliteError)?; @@ -1582,7 +1653,7 @@ impl MemPoolDB { FROM address_nonce_ranked ORDER BY origin_rank ASC, sponsor_rank ASC, sort_fee_rate DESC "; - let mut query_stmt_nonce_rank = self.db.prepare(&sql).map_err(Error::SqliteError)?; + let mut query_stmt_nonce_rank = self.db.prepare(sql).map_err(Error::SqliteError)?; let mut nonce_rank_iterator = query_stmt_nonce_rank .query(NO_PARAMS) .map_err(Error::SqliteError)?; @@ -1597,31 +1668,44 @@ impl MemPoolDB { // First, try to read from the retry list let (candidate, update_estimate) = match settings.strategy { MemPoolWalkStrategy::GlobalFeeRate => { - let start_with_no_estimate = tx_consideration_sampler.sample(&mut rng) - < settings.consider_no_estimate_tx_prob; - // randomly select from either the null fee-rate transactions or those with fee-rate estimates. - let opt_tx = if start_with_no_estimate { - null_iterator.next().map_err(Error::SqliteError)? - } else { - fee_iterator.next().map_err(Error::SqliteError)? - }; - match opt_tx { - Some(row) => { - (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate) + // First, try to read from the retry list + match candidate_cache.next() { + Some(tx) => { + let update_estimate = tx.fee_rate.is_none(); + (tx, update_estimate) } None => { - // If the selected iterator is empty, check the other - match if start_with_no_estimate { - fee_iterator.next().map_err(Error::SqliteError)? - } else { + // When the retry list is empty, read from the mempool db, + // randomly selecting from either the null fee-rate transactions + // or those with fee-rate estimates. + let start_with_no_estimate = tx_consideration_sampler + .sample(&mut rng) + < settings.consider_no_estimate_tx_prob; + let opt_tx = if start_with_no_estimate { null_iterator.next().map_err(Error::SqliteError)? - } { + } else { + fee_iterator.next().map_err(Error::SqliteError)? + }; + match opt_tx { Some(row) => ( MemPoolTxInfoPartial::from_row(row)?, - !start_with_no_estimate, + start_with_no_estimate, ), None => { - break MempoolIterationStopReason::NoMoreCandidates; + // If the selected iterator is empty, check the other + match if start_with_no_estimate { + fee_iterator.next().map_err(Error::SqliteError)? + } else { + null_iterator.next().map_err(Error::SqliteError)? + } { + Some(row) => ( + MemPoolTxInfoPartial::from_row(row)?, + !start_with_no_estimate, + ), + None => { + break MempoolIterationStopReason::NoMoreCandidates; + } + } } } } @@ -1679,6 +1763,10 @@ impl MemPoolDB { "expected_origin_nonce" => expected_origin_nonce, "expected_sponsor_nonce" => expected_sponsor_nonce, ); + if settings.strategy == MemPoolWalkStrategy::GlobalFeeRate { + // This transaction could become runnable in this pass, save it for later + candidate_cache.push(candidate); + } continue; } Ordering::Equal => { @@ -1804,6 +1892,15 @@ impl MemPoolDB { // query. let mut nonce_conn = self.reopen(true)?; nonce_cache.flush(&mut nonce_conn); + + if settings.strategy == MemPoolWalkStrategy::GlobalFeeRate { + // Reset for finding the next transaction to process + debug!( + "Mempool: reset: retry list has {} entries", + candidate_cache.len() + ); + candidate_cache.reset(); + } }; // drop these rusqlite statements and queries, since their existence as immutable borrows on the From 09a34091110c7c6fad2750a73c816b36fd561bf5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 12 Mar 2025 15:14:19 -0500 Subject: [PATCH 140/238] feat: initial implementation of signer state machine --- libsigner/src/v0/messages.rs | 2 +- stacks-signer/Cargo.toml | 1 + stacks-signer/src/chainstate.rs | 8 +- stacks-signer/src/client/stacks_client.rs | 8 +- stacks-signer/src/lib.rs | 5 +- stacks-signer/src/monitoring/mod.rs | 13 + stacks-signer/src/monitoring/prometheus.rs | 6 + stacks-signer/src/runloop.rs | 19 +- stacks-signer/src/v0/mod.rs | 2 + stacks-signer/src/v0/signer.rs | 78 +++-- stacks-signer/src/v0/signer_state.rs | 309 ++++++++++++++++++++ testnet/stacks-node/src/tests/signer/mod.rs | 292 +++++++++++++++++- testnet/stacks-node/src/tests/signer/v0.rs | 9 + 13 files changed, 711 insertions(+), 41 deletions(-) create mode 100644 stacks-signer/src/v0/signer_state.rs diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index e9234cf5e4c..fac89767dc1 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -274,7 +274,7 @@ pub struct PeerInfo { pub stacks_tip: BlockHeaderHash, /// The stacks tip height pub stacks_tip_height: u64, - /// The pox consensus + /// The consensus hash of the current burnchain tip pub pox_consensus: ConsensusHash, /// The server version pub server_version: String, diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index eb58164a6e6..19687e0c09e 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -61,5 +61,6 @@ version = "0.24.3" features = ["serde", "recovery"] [features] +default = [] monitoring_prom = ["libsigner/monitoring_prom", "prometheus", "tiny_http"] testing = [] diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 8654450738a..ad36730d0c7 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -39,6 +39,9 @@ pub enum SignerChainstateError { /// Error resulting from crate::client interactions #[error("Client error: {0}")] ClientError(#[from] ClientError), + /// The signer could not find information about the parent tenure + #[error("No information available for parent tenure '{0}'")] + NoParentTenureInfo(ConsensusHash), } impl From for RejectReason { @@ -418,7 +421,10 @@ impl SortitionsView { Ok(()) } - fn check_parent_tenure_choice( + /// Check if the tenure defined by `sortition_state` is building off of an + /// appropriate tenure. Note that this does not check that it confirms the correct + /// number of blocks from that tenure! + pub fn check_parent_tenure_choice( sortition_state: &SortitionState, block: &NakamotoBlock, signer_db: &SignerDb, diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index db0b356fb40..183551c23f9 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -168,17 +168,17 @@ impl StacksClient { &self.stacks_address } - /// Get the stacks tip header of the tenure given its consensus hash + /// Get the header of the highest known block in the given tenure pub fn get_tenure_tip( &self, - consensus_hash: &ConsensusHash, + tenure_id: &ConsensusHash, ) -> Result { debug!("StacksClient: Getting tenure tip"; - "consensus_hash" => %consensus_hash, + "consensus_hash" => %tenure_id, ); let send_request = || { self.stacks_node_client - .get(self.tenure_tip_path(consensus_hash)) + .get(self.tenure_tip_path(tenure_id)) .send() .map_err(|e| { warn!("Signer failed to request latest sortition"; "err" => ?e); diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 9f2df125341..7c646f69fcb 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -54,6 +54,7 @@ use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait, VERSION_STRI use runloop::SignerResult; use slog::{slog_info, slog_warn}; use stacks_common::{info, warn}; +use v0::signer_state::LocalStateMachine; use crate::client::StacksClient; use crate::config::SignerConfig; @@ -62,7 +63,7 @@ use crate::runloop::RunLoop; /// A trait which provides a common `Signer` interface for `v0` and `v1` pub trait Signer: Debug + Display { /// Create a new `Signer` instance - fn new(config: SignerConfig) -> Self; + fn new(stacks_client: &StacksClient, signer_config: SignerConfig) -> Self; /// Get the reward cycle of the signer fn reward_cycle(&self) -> u64; /// Process an event @@ -76,6 +77,8 @@ pub trait Signer: Debug + Display { ); /// Check if the signer is in the middle of processing blocks fn has_unprocessed_blocks(&self) -> bool; + /// Get a reference to the local state machine of the signer + fn get_local_state_machine(&self) -> &LocalStateMachine; } /// A wrapper around the running signer type for the signer diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index 60a530acabd..1c10807af2e 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -30,6 +30,7 @@ pub mod actions { use crate::config::GlobalConfig; use crate::monitoring::prometheus::*; + use crate::v0::signer_state::LocalStateMachine; /// Update stacks tip height gauge pub fn update_stacks_tip_height(height: i64) { @@ -100,6 +101,14 @@ pub mod actions { .observe(latency_ms as f64 / 1000.0); } + /// Record the current local state machine + pub fn record_local_state(state: LocalStateMachine) { + SIGNER_LOCAL_STATE_MACHINE + .lock() + .expect("Local state machine lock poisoned") + .replace(state); + } + /// Start serving monitoring metrics. /// This will only serve the metrics if the `monitoring_prom` feature is enabled. pub fn start_serving_monitoring_metrics(config: GlobalConfig) -> Result<(), String> { @@ -124,6 +133,7 @@ pub mod actions { use slog::slog_info; use stacks_common::info; + use crate::v0::signer_state::LocalStateMachine; use crate::GlobalConfig; /// Update stacks tip height gauge @@ -168,6 +178,9 @@ pub mod actions { /// Record the time taken to validate a block, as reported by the Stacks node. pub fn record_block_validation_latency(_latency_ms: u64) {} + /// Record the current local state machine + pub fn record_local_state(_state: LocalStateMachine) {} + /// Start serving monitoring metrics. /// This will only serve the metrics if the `monitoring_prom` feature is enabled. pub fn start_serving_monitoring_metrics(config: GlobalConfig) -> Result<(), String> { diff --git a/stacks-signer/src/monitoring/prometheus.rs b/stacks-signer/src/monitoring/prometheus.rs index 49f74ba1e88..048edb051d7 100644 --- a/stacks-signer/src/monitoring/prometheus.rs +++ b/stacks-signer/src/monitoring/prometheus.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::sync::Mutex; + use lazy_static::lazy_static; use prometheus::{ gather, histogram_opts, opts, register_histogram_vec, register_int_counter, @@ -21,6 +23,8 @@ use prometheus::{ IntGauge, TextEncoder, }; +use crate::v0::signer_state::LocalStateMachine; + lazy_static! { pub static ref STACKS_TIP_HEIGHT_GAUGE: IntGauge = register_int_gauge!(opts!( "stacks_signer_stacks_node_height", @@ -74,6 +78,8 @@ lazy_static! { "Time (seconds) measuring end-to-end time to respond to a block", vec![0.005, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 20.0, 30.0, 60.0, 120.0] ), &[]).unwrap(); + + pub static ref SIGNER_LOCAL_STATE_MACHINE: Mutex> = Mutex::new(None); } pub fn gather_metrics_string() -> String { diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index f8bb4acac99..2d3bf12e989 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -26,6 +26,7 @@ use stacks_common::{debug, error, info, warn}; use crate::chainstate::SortitionsView; use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; use crate::config::{GlobalConfig, SignerConfig, SignerConfigMode}; +use crate::v0::signer_state::LocalStateMachine; #[cfg(any(test, feature = "testing"))] use crate::v0::tests::TEST_SKIP_SIGNER_CLEANUP; use crate::Signer as SignerTrait; @@ -53,6 +54,9 @@ pub struct StateInfo { pub reward_cycle_info: Option, /// The current running signers reward cycles pub running_signers: Vec, + /// The local state machines for the running signers + /// as a pair of (reward-cycle, state-machine) + pub signer_state_machines: Vec<(u64, Option)>, } /// The signer result that can be sent across threads @@ -326,7 +330,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo let new_signer_config = match self.get_signer_config(reward_cycle) { Ok(Some(new_signer_config)) => { let signer_mode = new_signer_config.signer_mode.clone(); - let new_signer = Signer::new(new_signer_config); + let new_signer = Signer::new(&self.stacks_client, new_signer_config); info!("{new_signer} Signer is registered for reward cycle {reward_cycle} as {signer_mode}. Initialized signer state."); ConfiguredSigner::RegisteredSigner(new_signer) } @@ -507,6 +511,19 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> .values() .map(|s| s.reward_cycle()) .collect(), + signer_state_machines: self + .stacks_signers + .iter() + .map(|(reward_cycle, signer)| { + let ConfiguredSigner::RegisteredSigner(ref signer) = signer else { + return (*reward_cycle, None); + }; + ( + *reward_cycle, + Some(signer.get_local_state_machine().clone()), + ) + }) + .collect(), } .into()]) { diff --git a/stacks-signer/src/v0/mod.rs b/stacks-signer/src/v0/mod.rs index 34b363311ec..f1484028219 100644 --- a/stacks-signer/src/v0/mod.rs +++ b/stacks-signer/src/v0/mod.rs @@ -16,6 +16,8 @@ /// The signer module for processing events pub mod signer; +/// The state machine for the signer view +pub mod signer_state; #[cfg(any(test, feature = "testing"))] /// Test specific functions for the signer module diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2decd05dc16..0bf5956aaed 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -46,6 +46,7 @@ use stacks_common::util::get_epoch_time_secs; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, error, info, warn}; +use super::signer_state::LocalStateMachine; use crate::chainstate::{ProposalEvalConfig, SortitionMinerStatus, SortitionsView}; use crate::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use crate::config::{SignerConfig, SignerConfigMode}; @@ -107,6 +108,8 @@ pub struct Signer { pub submitted_block_proposal: Option<(Sha512Trunc256Sum, Instant)>, /// Maximum age of a block proposal in seconds before it is dropped without processing pub block_proposal_max_age_secs: u64, + /// The signer's local state machine used in signer set agreement + pub local_state_machine: LocalStateMachine, } impl std::fmt::Display for SignerMode { @@ -126,8 +129,40 @@ impl std::fmt::Display for Signer { impl SignerTrait for Signer { /// Create a new signer from the given configuration - fn new(config: SignerConfig) -> Self { - Self::from(config) + fn new(stacks_client: &StacksClient, signer_config: SignerConfig) -> Self { + let stackerdb = StackerDB::from(&signer_config); + let mode = match signer_config.signer_mode { + SignerConfigMode::DryRun => SignerMode::DryRun, + SignerConfigMode::Normal { signer_id, .. } => SignerMode::Normal { signer_id }, + }; + + debug!("Reward cycle #{} {mode}", signer_config.reward_cycle); + + let signer_db = + SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); + let proposal_config = ProposalEvalConfig::from(&signer_config); + + let signer_state = LocalStateMachine::new(&signer_db, stacks_client, &proposal_config) + .unwrap_or_else(|e| { + warn!("Failed to initialize local state machine for signer: {e:?}"); + LocalStateMachine::Uninitialized + }); + Self { + private_key: signer_config.stacks_private_key, + stackerdb, + mainnet: signer_config.mainnet, + mode, + signer_addresses: signer_config.signer_entries.signer_addresses.clone(), + signer_weights: signer_config.signer_entries.signer_addr_to_weight.clone(), + signer_slot_ids: signer_config.signer_slot_ids.clone(), + reward_cycle: signer_config.reward_cycle, + signer_db, + proposal_config, + submitted_block_proposal: None, + block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, + block_proposal_max_age_secs: signer_config.block_proposal_max_age_secs, + local_state_machine: signer_state, + } } /// Return the reward cycle of the signer @@ -177,6 +212,10 @@ impl SignerTrait for Signer { debug!("{self}: Signer reward cycle has not yet started. Ignoring event."); return; } + + self.local_state_machine.handle_pending_update(&self.signer_db, stacks_client, &self.proposal_config) + .unwrap_or_else(|e| error!("{self}: failed to update local state machine for pending update"; "err" => ?e)); + match event { SignerEvent::BlockValidationResponse(block_validate_response) => { debug!("{self}: Received a block proposal result from the stacks node..."); @@ -283,6 +322,9 @@ impl SignerTrait for Signer { ); panic!("{self} Failed to write burn block event to signerdb: {e}"); }); + self.local_state_machine + .bitcoin_block_arrival(&self.signer_db, stacks_client, &self.proposal_config, Some(*burn_height)) + .unwrap_or_else(|e| error!("{self}: failed to update local state machine for latest bitcoin block arrival"; "err" => ?e)); *sortition_state = None; } SignerEvent::NewBlock { @@ -324,37 +366,9 @@ impl SignerTrait for Signer { true }) } -} - -impl From for Signer { - fn from(signer_config: SignerConfig) -> Self { - let stackerdb = StackerDB::from(&signer_config); - let mode = match signer_config.signer_mode { - SignerConfigMode::DryRun => SignerMode::DryRun, - SignerConfigMode::Normal { signer_id, .. } => SignerMode::Normal { signer_id }, - }; - debug!("Reward cycle #{} {mode}", signer_config.reward_cycle); - - let signer_db = - SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); - let proposal_config = ProposalEvalConfig::from(&signer_config); - - Self { - private_key: signer_config.stacks_private_key, - stackerdb, - mainnet: signer_config.mainnet, - mode, - signer_addresses: signer_config.signer_entries.signer_addresses.clone(), - signer_weights: signer_config.signer_entries.signer_addr_to_weight.clone(), - signer_slot_ids: signer_config.signer_slot_ids.clone(), - reward_cycle: signer_config.reward_cycle, - signer_db, - proposal_config, - submitted_block_proposal: None, - block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, - block_proposal_max_age_secs: signer_config.block_proposal_max_age_secs, - } + fn get_local_state_machine(&self) -> &LocalStateMachine { + &self.local_state_machine } } diff --git a/stacks-signer/src/v0/signer_state.rs b/stacks-signer/src/v0/signer_state.rs new file mode 100644 index 00000000000..36702b10496 --- /dev/null +++ b/stacks-signer/src/v0/signer_state.rs @@ -0,0 +1,309 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::time::Duration; + +use blockstack_lib::chainstate::burn::ConsensusHashExtensions; +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use serde::{Deserialize, Serialize}; +use slog::slog_warn; +use stacks_common::bitvec::BitVec; +use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::MessageSignature; +use stacks_common::warn; + +use crate::chainstate::{ + ProposalEvalConfig, SignerChainstateError, SortitionState, SortitionsView, +}; +use crate::client::{ClientError, CurrentAndLastSortition, StacksClient}; +use crate::signerdb::SignerDb; + +/// A signer state machine view. This struct can +/// be used to encode the local signer's view or +/// the global view. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SignerStateMachine { + /// The tip burn block (i.e., the latest bitcoin block) seen by this signer + pub burn_block: ConsensusHash, + /// The tip burn block height (i.e., the latest bitcoin block) seen by this signer + pub burn_block_height: u64, + /// The signer's view of who the current miner should be (and their tenure building info) + pub current_miner: MinerState, + /// The active signing protocol version + pub active_signer_protocol_version: u64, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +/// Enum for capturing the signer state machine's view of who +/// should be the active miner and what their tenure should be +/// built on top of. +pub enum MinerState { + /// The information for the current active miner + ActiveMiner { + /// The pubkeyhash of the current miner's signing key + current_miner_pkh: Hash160, + /// The tenure that the current miner is building on top of + parent_tenure_id: ConsensusHash, + /// The last block of the parent tenure (which should be + /// the block that the next tenure starts from) + parent_tenure_last_block: StacksBlockId, + /// The height of the last block of the parent tenure (which should be + /// the block that the next tenure starts from) + parent_tenure_last_block_height: u64, + }, + /// This signer doesn't believe there's any valid miner + NoValidMiner, +} + +/// The local signer state machine +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum LocalStateMachine { + /// The local state machine couldn't be instantiated + Uninitialized, + /// The local state machine is instantiated + Initialized(SignerStateMachine), + /// The local state machine has a pending update + Pending { + /// The pending update + update: StateMachineUpdate, + /// The local state machine before the pending update + prior: SignerStateMachine, + }, +} + +/// A pending update for a signer state machine +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum StateMachineUpdate { + /// A new burn block at height u64 is expected + BurnBlock(u64), +} + +impl LocalStateMachine { + /// Initialize a local state machine by querying the local stacks-node + /// and signerdb for the current sortition information + pub fn new( + db: &SignerDb, + client: &StacksClient, + proposal_config: &ProposalEvalConfig, + ) -> Result { + let mut instance = Self::Uninitialized; + instance.bitcoin_block_arrival(db, client, proposal_config, None)?; + + Ok(instance) + } + + fn place_holder() -> SignerStateMachine { + SignerStateMachine { + burn_block: ConsensusHash::empty(), + burn_block_height: 0, + current_miner: MinerState::NoValidMiner, + active_signer_protocol_version: 1, + } + } + + /// If this local state machine has pending updates, process them + pub fn handle_pending_update( + &mut self, + db: &SignerDb, + client: &StacksClient, + proposal_config: &ProposalEvalConfig, + ) -> Result<(), SignerChainstateError> { + let LocalStateMachine::Pending { update, .. } = self else { + return Ok(()); + }; + match update.clone() { + StateMachineUpdate::BurnBlock(expected_burn_height) => { + self.bitcoin_block_arrival(db, client, proposal_config, Some(expected_burn_height)) + } + } + } + + fn make_miner_state( + sortition_to_set: SortitionState, + client: &StacksClient, + db: &SignerDb, + proposal_config: &ProposalEvalConfig, + ) -> Result { + let next_current_miner_pkh = sortition_to_set.miner_pkh; + let next_parent_tenure_id = sortition_to_set.parent_tenure_id; + + let stacks_node_last_block = client + .get_tenure_tip(&next_parent_tenure_id) + .inspect_err(|e| { + warn!( + "Failed to fetch last block in parent tenure from stacks-node"; + "parent_tenure_id" => %sortition_to_set.parent_tenure_id, + "err" => ?e, + ) + }) + .ok() + .map(|header| { + ( + header.height(), + StacksBlockId::new(&next_parent_tenure_id, &header.block_hash()), + ) + }); + let signerdb_last_block = SortitionsView::get_tenure_last_block_info( + &next_parent_tenure_id, + db, + proposal_config.tenure_last_block_proposal_timeout, + )? + .map(|info| (info.block.header.chain_length, info.block.block_id())); + + let (parent_tenure_last_block_height, parent_tenure_last_block) = + match (stacks_node_last_block, signerdb_last_block) { + (Some(stacks_node_info), Some(signerdb_info)) => { + std::cmp::max_by_key(stacks_node_info, signerdb_info, |info| info.0) + } + (None, Some(signerdb_info)) => signerdb_info, + (Some(stacks_node_info), None) => stacks_node_info, + (None, None) => { + return Err(SignerChainstateError::NoParentTenureInfo( + next_parent_tenure_id, + )) + } + }; + + let miner_state = MinerState::ActiveMiner { + current_miner_pkh: next_current_miner_pkh, + parent_tenure_id: next_parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + }; + + Ok(miner_state) + } + + /// Handle a new bitcoin block arrival + pub fn bitcoin_block_arrival( + &mut self, + db: &SignerDb, + client: &StacksClient, + proposal_config: &ProposalEvalConfig, + mut expected_burn_height: Option, + ) -> Result<(), SignerChainstateError> { + // set self to uninitialized so that if this function errors, + // self is left as uninitialized. + let prior_state = std::mem::replace(self, Self::Uninitialized); + let prior_state_machine = match prior_state { + // if the local state machine was uninitialized, just initialize it + LocalStateMachine::Uninitialized => Self::place_holder(), + LocalStateMachine::Initialized(signer_state_machine) => signer_state_machine, + LocalStateMachine::Pending { update, prior } => { + // This works as long as the pending updates are only burn blocks, + // but if we have other kinds of pending updates, this logic will need + // to be changed. + match update { + StateMachineUpdate::BurnBlock(pending_burn_height) => { + if pending_burn_height > expected_burn_height.unwrap_or(0) { + expected_burn_height = Some(pending_burn_height); + } + } + } + + prior + } + }; + + let peer_info = client.get_peer_info()?; + let next_burn_block_height = peer_info.burn_block_height; + let next_burn_block_hash = peer_info.pox_consensus; + + if let Some(expected_burn_height) = expected_burn_height { + if next_burn_block_height < expected_burn_height { + *self = Self::Pending { + update: StateMachineUpdate::BurnBlock(expected_burn_height), + prior: prior_state_machine, + }; + return Err(ClientError::InvalidResponse( + "Node has not processed the next burn block yet".into(), + ) + .into()); + } + } + + let CurrentAndLastSortition { + current_sortition, + last_sortition, + } = client.get_current_and_last_sortition()?; + + let cur_sortition = SortitionState::try_from(current_sortition)?; + let last_sortition = last_sortition + .map(SortitionState::try_from) + .transpose() + .ok() + .flatten() + .ok_or_else(|| { + ClientError::InvalidResponse( + "Fetching latest and last sortitions failed to return both sortitions".into(), + ) + })?; + + let standin_block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 0, + chain_length: 0, + burn_spent: 0, + consensus_hash: cur_sortition.consensus_hash.clone(), + parent_block_id: StacksBlockId::first_mined(), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 0, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }; + let is_current_valid = SortitionsView::check_parent_tenure_choice( + &cur_sortition, + &standin_block, + &db, + &client, + &proposal_config.first_proposal_burn_block_timing, + )?; + + let miner_state = if is_current_valid { + Self::make_miner_state(cur_sortition, client, db, proposal_config)? + } else { + let is_last_valid = SortitionsView::check_parent_tenure_choice( + &last_sortition, + &standin_block, + &db, + &client, + &proposal_config.first_proposal_burn_block_timing, + )?; + + if is_last_valid { + Self::make_miner_state(cur_sortition, client, db, proposal_config)? + } else { + warn!("Neither the current nor the prior sortition winner is considered a valid tenure"); + MinerState::NoValidMiner + } + }; + + // Note: we do this at the end so that the transform isn't fallible. + // we should come up with a better scheme here. + *self = Self::Initialized(SignerStateMachine { + burn_block: next_burn_block_hash, + burn_block_height: next_burn_block_height, + current_miner: miner_state, + active_signer_protocol_version: prior_state_machine.active_signer_protocol_version, + }); + + Ok(()) + } +} diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 185335ce1ed..f2562536fef 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -35,7 +35,7 @@ use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, In use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; -use stacks::types::chainstate::{StacksAddress, StacksPublicKey}; +use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPublicKey}; use stacks::types::PrivateKey; use stacks::util::get_epoch_time_secs; use stacks::util::hash::MerkleHashFunc; @@ -47,9 +47,11 @@ use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_signer::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; +use stacks_signer::v0::signer_state::{LocalStateMachine, MinerState}; use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; +use super::neon_integrations::get_sortition_info_ch; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; @@ -293,6 +295,294 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest= info_cur.burn_block_height + })) + + }) + .expect("Timed out while waiting to fetch local state machines from the signer set"); + + let sortition_latest = + get_sortition_info_ch(&self.running_nodes.conf, &info_cur.pox_consensus); + let sortition_prior = get_sortition_info_ch( + &self.running_nodes.conf, + sortition_latest.last_sortition_ch.as_ref().unwrap(), + ); + assert_eq!( + sortition_latest.last_sortition_ch, + sortition_latest.stacks_parent_ch + ); + let latest_block = self + .stacks_client + .get_tenure_tip(&sortition_prior.consensus_hash) + .unwrap(); + let latest_block_id = + StacksBlockId::new(&sortition_prior.consensus_hash, &latest_block.block_hash()); + + states + .into_iter() + .enumerate() + .for_each(|(ix, signer_state)| { + let state_machine = signer_state + .signer_state_machines + .into_iter() + .find_map(|(rc, state)| if current_rc % 2 == rc { Some(state) } else { None }) + .expect( + "BUG: should be able to find signer state machine at the current reward cycle", + ) + .expect("BUG: signer state machine should exist at the current reward cycle"); + + let LocalStateMachine::Initialized(state_machine) = state_machine else { + error!("Local state machine was not initialized"); + panic!(); + }; + + assert_eq!(state_machine.burn_block, info_cur.pox_consensus,); + assert_eq!(state_machine.burn_block_height, info_cur.burn_block_height,); + let MinerState::ActiveMiner { current_miner_pkh, parent_tenure_id, parent_tenure_last_block, parent_tenure_last_block_height } = + state_machine.current_miner + else { + error!("State machine for Signer #{ix} did not have an active miner"); + panic!(); + }; + assert_eq!(Some(current_miner_pkh), sortition_latest.miner_pk_hash160); + assert_eq!(parent_tenure_id, sortition_prior.consensus_hash); + assert_eq!(parent_tenure_last_block, latest_block_id); + assert_eq!(parent_tenure_last_block_height, latest_block.height()); + }); + } + + /// Fetch the local signer state machine for all the signers, + /// waiting until every signer has processed the latest burn block. + /// Then, check that every signer's state machine corresponds to the + /// latest burn block: + /// 1. Having a valid sortition + /// 2. The active miner is the winner of that sortition + /// 3. The active miner is building off of the prior tenure + pub fn check_signer_states_reorg( + &mut self, + accepting_reorg: &[StacksPublicKey], + rejecting_reorg: &[StacksPublicKey], + ) { + let info_cur = self.get_peer_info(); + let current_rc = self.get_current_reward_cycle(); + let mut states = Vec::with_capacity(0); + let accepting_reorg: Vec<_> = accepting_reorg + .iter() + .map(|pk| { + self.signer_stacks_private_keys + .iter() + .position(|sk| &StacksPublicKey::from_private(&sk) == pk) + .unwrap() + }) + .collect(); + let rejecting_reorg: Vec<_> = rejecting_reorg + .iter() + .map(|pk| { + self.signer_stacks_private_keys + .iter() + .position(|sk| &StacksPublicKey::from_private(&sk) == pk) + .unwrap() + }) + .collect(); + + wait_for(120, || { + states = self.get_all_states(); + Ok(states.iter().enumerate().all(|(ix, signer_state)| { + let state_machine = signer_state + .signer_state_machines + .iter() + .find_map(|(rc, state)| { + if current_rc % 2 == *rc { + Some(state.as_ref()) + } else { + None + } + }) + .expect( + "BUG: should be able to find signer state machine at the current reward cycle", + ) + .expect("BUG: signer state machine should exist at the current reward cycle"); + + let LocalStateMachine::Initialized(state_machine) = state_machine else { + warn!("Local state machine for signer #{ix} not initialized"); + return false; + }; + state_machine.burn_block_height >= info_cur.burn_block_height + })) + + }) + .expect("Timed out while waiting to fetch local state machines from the signer set"); + + let sortition_latest = + get_sortition_info_ch(&self.running_nodes.conf, &info_cur.pox_consensus); + let sortition_parent = get_sortition_info_ch( + &self.running_nodes.conf, + sortition_latest.stacks_parent_ch.as_ref().unwrap(), + ); + let sortition_prior = get_sortition_info_ch( + &self.running_nodes.conf, + sortition_latest.last_sortition_ch.as_ref().unwrap(), + ); + assert!(sortition_latest.last_sortition_ch != sortition_latest.stacks_parent_ch); + let latest_block = self + .stacks_client + .get_tenure_tip(&sortition_parent.consensus_hash) + .unwrap(); + let latest_block_id = + StacksBlockId::new(&sortition_parent.consensus_hash, &latest_block.block_hash()); + + info!("Sortition Latest: {sortition_latest:?}"); + info!("Sortition Parent: {sortition_parent:?}"); + info!("Sortition Prior: {sortition_prior:?}"); + + info!("Expected accepting: {accepting_reorg:?}"); + info!("Expected rejecting: {rejecting_reorg:?}"); + + states.iter().enumerate().for_each(|(ix, signer_state)| { + let state_machine = signer_state + .signer_state_machines + .iter() + .find_map(|(rc, state)| { + if current_rc % 2 == *rc { + Some(state.as_ref()) + } else { + None + } + }) + .expect( + "BUG: should be able to find signer state machine at the current reward cycle", + ) + .expect("BUG: signer state machine should exist at the current reward cycle"); + + let LocalStateMachine::Initialized(state_machine) = state_machine else { + error!("Local state machine was not initialized"); + panic!(); + }; + + info!("Signer #{ix} has state machine: {state_machine:?}"); + }); + + states + .into_iter() + .enumerate() + .for_each(|(ix, signer_state)| { + let state_machine = signer_state + .signer_state_machines + .into_iter() + .find_map(|(rc, state)| if current_rc % 2 == rc { Some(state) } else { None }) + .expect( + "BUG: should be able to find signer state machine at the current reward cycle", + ) + .expect("BUG: signer state machine should exist at the current reward cycle"); + + let LocalStateMachine::Initialized(state_machine) = state_machine else { + error!("Local state machine was not initialized"); + panic!(); + }; + + info!("Signer #{ix} has state machine: {state_machine:?}"); + + assert_eq!(state_machine.burn_block, info_cur.pox_consensus,); + assert_eq!(state_machine.burn_block_height, info_cur.burn_block_height,); + let MinerState::ActiveMiner { current_miner_pkh, parent_tenure_id, parent_tenure_last_block, parent_tenure_last_block_height } = + state_machine.current_miner + else { + error!("State machine for Signer #{ix} did not have an active miner"); + panic!(); + }; + if accepting_reorg.contains(&ix) { + assert_eq!(Some(current_miner_pkh), sortition_latest.miner_pk_hash160); + assert_eq!(parent_tenure_id, sortition_parent.consensus_hash); + assert_eq!(parent_tenure_last_block, latest_block_id); + assert_eq!(parent_tenure_last_block_height, latest_block.height()); + } else if rejecting_reorg.contains(&ix) { + assert_eq!(Some(current_miner_pkh), sortition_prior.miner_pk_hash160); + } else { + error!("Signer #{ix} was not supplied in either the approving or rejecting vectors"); + panic!(); + } + }); + } + + /// Get status check results (if returned) from each signer (blocks on the receipt) + /// Returns Some() or None() for each signer, in order of `self.spawned_signers` + pub fn get_all_states(&mut self) -> Vec { + let mut finished_signers = HashSet::new(); + let mut output_states = Vec::new(); + let mut sent_request = false; + wait_for(120, || { + if !sent_request { + // clear any stale states + if self + .get_states(&finished_signers) + .iter() + .any(|s| s.is_some()) + { + info!("Had stale state responses, trying again to clear"); + return Ok(false); + } + self.send_status_request(&finished_signers); + sent_request = true; + thread::sleep(Duration::from_secs(1)); + } + + let latest_states = self.get_states(&finished_signers); + for (ix, state) in latest_states.into_iter().enumerate() { + let Some(state) = state else { + continue; + }; + + finished_signers.insert(ix); + output_states.push((ix, state)); + } + info!( + "Finished signers: {:?}", + finished_signers.iter().collect::>() + ); + Ok(finished_signers.len() == self.spawned_signers.len()) + }) + .expect("Timed out waiting for state responses from signer set"); + + output_states.sort_by_key(|(ix, _state)| *ix); + output_states + .into_iter() + .map(|(_ix, state)| state) + .collect() + } + /// Get status check results (if returned) from each signer without blocking /// Returns Some() or None() for each signer, in order of `self.spawned_signers` pub fn get_states(&mut self, exclude: &HashSet) -> Vec> { diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index edfdd5f7de6..7a9f69aef5d 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -69,6 +69,7 @@ use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::signerdb::SignerDb; use stacks_signer::v0::signer::TEST_REPEAT_PROPOSAL_RESPONSE; +use stacks_signer::v0::signer_state::LocalStateMachine; use stacks_signer::v0::tests::{ TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, TEST_SKIP_SIGNER_CLEANUP, TEST_STALL_BLOCK_VALIDATION_SUBMISSION, @@ -11996,6 +11997,8 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 30) .expect("Failed to mine BTC block followed by Block N"); verify_sortition_winner(&sortdb, &miner_pkh_1); + miners.signer_test.check_signer_states_normal(); + let block_n = wait_for_block_pushed_by_miner_key(30, info_before.stacks_tip_height + 1, &miner_pk_1) .expect("Failed to get block N"); @@ -12021,6 +12024,7 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 30) .expect("Failed to mine BTC block"); verify_sortition_winner(&sortdb, &miner_pkh_2); + miners.signer_test.check_signer_states_normal(); info!("------------------------- Miner 1 Submits a Block Commit -------------------------"); miners.submit_commit_miner_1(&sortdb); @@ -12043,11 +12047,16 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { miners .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 30) .expect("Failed to mine BTC block"); + let block_n_1_prime = wait_for_block_proposal(30, block_n_height + 1, &miner_pk_1) .expect("Failed to get block proposal N+1'"); // Stall the miner from proposing again until we're ready TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk_1]); + miners + .signer_test + .check_signer_states_reorg(&approving_signers, &rejecting_signers); + info!("------------------------- Wait for 3 acceptances and 2 rejections -------------------------"); let signer_signature_hash = block_n_1_prime.header.signer_signature_hash(); wait_for_block_acceptance_from_signers(30, &signer_signature_hash, &approving_signers) From 3a25b2790229ce3b0c231cc6e7e14b339844a355 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 17 Mar 2025 13:24:56 -0500 Subject: [PATCH 141/238] fix bug in the reorg handling --- stacks-signer/src/v0/signer_state.rs | 4 +-- testnet/stacks-node/src/tests/signer/mod.rs | 31 --------------------- 2 files changed, 1 insertion(+), 34 deletions(-) diff --git a/stacks-signer/src/v0/signer_state.rs b/stacks-signer/src/v0/signer_state.rs index 36702b10496..ba51d92850f 100644 --- a/stacks-signer/src/v0/signer_state.rs +++ b/stacks-signer/src/v0/signer_state.rs @@ -13,8 +13,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::time::Duration; - use blockstack_lib::chainstate::burn::ConsensusHashExtensions; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use serde::{Deserialize, Serialize}; @@ -288,7 +286,7 @@ impl LocalStateMachine { )?; if is_last_valid { - Self::make_miner_state(cur_sortition, client, db, proposal_config)? + Self::make_miner_state(last_sortition, client, db, proposal_config)? } else { warn!("Neither the current nor the prior sortition winner is considered a valid tenure"); MinerState::NoValidMiner diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index f2562536fef..a9f27865660 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -464,37 +464,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Date: Mon, 17 Mar 2025 14:52:06 -0400 Subject: [PATCH 142/238] fix: resolve issue with `GlobalFeeRate` strategy --- stackslib/src/core/mempool.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index b253bd901f6..971144de50e 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1874,6 +1874,15 @@ impl MemPoolDB { break MempoolIterationStopReason::IteratorExited; } } + + if settings.strategy == MemPoolWalkStrategy::GlobalFeeRate { + // Reset for finding the next transaction to process + debug!( + "Mempool: reset: retry list has {} entries", + candidate_cache.len() + ); + candidate_cache.reset(); + } }; // If we've reached the end of the mempool, or if we've stopped @@ -1883,7 +1892,7 @@ impl MemPoolDB { || !state_changed { if stop_reason == MempoolIterationStopReason::NoMoreCandidates { - info!("Mempool: no more transactions to consider"); + debug!("Mempool: no more transactions to consider"); } break stop_reason; } @@ -1892,15 +1901,6 @@ impl MemPoolDB { // query. let mut nonce_conn = self.reopen(true)?; nonce_cache.flush(&mut nonce_conn); - - if settings.strategy == MemPoolWalkStrategy::GlobalFeeRate { - // Reset for finding the next transaction to process - debug!( - "Mempool: reset: retry list has {} entries", - candidate_cache.len() - ); - candidate_cache.reset(); - } }; // drop these rusqlite statements and queries, since their existence as immutable borrows on the From c86ff5a219d3491c1639f4d379aaf74653254908 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 17 Mar 2025 16:04:50 -0400 Subject: [PATCH 143/238] test: fix Bitcoin test exclusions --- .github/workflows/bitcoin-tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 169ba7272a8..0548ec5851e 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -124,9 +124,9 @@ jobs: # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition - test-name: tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY # These mempool tests take a long time to run, and are meant to be run manually - - test-name: tests::nakamoto_integrations::large_mempool - - test-name: tests::nakamoto_integrations::large_mempool_random_fee - test-name: tests::nakamoto_integrations::larger_mempool + - test-name: tests::signer::v0::large_mempool + - test-name: tests::signer::v0::large_mempool_random_fee - test-name: tests::signer::v0::larger_mempool steps: From 37ab5e7a0f6a84028fdc97c8ac2e3be8613400ab Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 17 Mar 2025 13:15:32 -0700 Subject: [PATCH 144/238] Add required check for release build --- .github/workflows/github-release.yml | 49 ++++++++++++++-------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 5da93df0a6d..68869b8829c 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -51,6 +51,7 @@ jobs: inputs.signer_tag != '' name: Build Binaries runs-on: ubuntu-latest + environment: "Build Release" strategy: ## Run a maximum of 10 builds concurrently, using the matrix defined in inputs.arch max-parallel: 10 @@ -85,29 +86,6 @@ jobs: signer_docker_tag: ${{ inputs.signer_docker_tag }} is_node_release: ${{ inputs.is_node_release }} - ## Runs when the following is true: - ## - either node or signer tag is provided - create-release: - if: | - inputs.node_tag != '' || - inputs.signer_tag != '' - name: Create Release - runs-on: ubuntu-latest - needs: - - build-binaries - steps: - ## Creates releases - - name: Create Release - uses: stacks-network/actions/stacks-core/release/create-releases@main - with: - node_tag: ${{ inputs.node_tag }} - node_docker_tag: ${{ inputs.node_docker_tag }} - signer_tag: ${{ inputs.signer_tag }} - signer_docker_tag: ${{ inputs.signer_docker_tag }} - is_node_release: ${{ inputs.is_node_release }} - is_signer_release: ${{ inputs.is_signer_release }} - GH_TOKEN: ${{ secrets.GH_TOKEN }} - ## Builds arch dependent Docker images from binaries ## ## Runs when the following is true: @@ -120,7 +98,6 @@ jobs: runs-on: ubuntu-latest needs: - build-binaries - - create-release strategy: fail-fast: false ## Build a maximum of 2 images concurrently based on matrix.dist @@ -143,6 +120,30 @@ jobs: DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} dist: ${{ matrix.dist }} + ## Runs when the following is true: + ## - either node or signer tag is provided + create-release: + if: | + inputs.node_tag != '' || + inputs.signer_tag != '' + name: Create Release + runs-on: ubuntu-latest + needs: + - build-binaries + - docker-image + steps: + ## Creates releases + - name: Create Release + uses: stacks-network/actions/stacks-core/release/create-releases@main + with: + node_tag: ${{ inputs.node_tag }} + node_docker_tag: ${{ inputs.node_docker_tag }} + signer_tag: ${{ inputs.signer_tag }} + signer_docker_tag: ${{ inputs.signer_docker_tag }} + is_node_release: ${{ inputs.is_node_release }} + is_signer_release: ${{ inputs.is_signer_release }} + GH_TOKEN: ${{ secrets.GH_TOKEN }} + ## Create the downstream PR for the release branch to master,develop create-pr: if: | From 66f7b4ce2fda100eddff6f9e89fa194bbc422d91 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 17 Mar 2025 16:55:59 -0400 Subject: [PATCH 145/238] test: update mempool unit test for latest algorithm --- .../chainstate/stacks/tests/block_construction.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 472d71a2343..e5e4ced4e04 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5122,7 +5122,7 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { (2, 9, 3, 5, 1000.0), (2, 10, 3, 6, 1500.0), (3, 4, 3, 4, 100.0), - (4, 3, 5, 2, 500.0), + (4, 3, 5, 2, 550.0), (5, 0, 5, 0, 500.0), (5, 1, 5, 1, 500.0), (5, 3, 4, 4, 2000.0), @@ -5245,17 +5245,17 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { // Ignores old and very future nonces. let expected_tx_order = vec![ (address_2.clone(), 6, address_4.clone(), 1, 1000), - (address_2.clone(), 7, address_4.clone(), 2, 800), - (address_2.clone(), 8, address_2.clone(), 8, 1000), (address_5.clone(), 0, address_5.clone(), 0, 500), - (address_5.clone(), 1, address_5.clone(), 1, 500), - (address_4.clone(), 3, address_5.clone(), 2, 500), - (address_5.clone(), 3, address_4.clone(), 4, 2000), - (address_5.clone(), 4, address_4.clone(), 5, 2000), (address_0.clone(), 2, address_0.clone(), 2, 300), + (address_2.clone(), 7, address_4.clone(), 2, 800), + (address_5.clone(), 1, address_5.clone(), 1, 500), (address_0.clone(), 3, address_0.clone(), 3, 400), + (address_2.clone(), 8, address_2.clone(), 8, 1000), + (address_4.clone(), 3, address_5.clone(), 2, 550), (address_0.clone(), 4, address_3.clone(), 0, 500), + (address_5.clone(), 3, address_4.clone(), 4, 2000), (address_1.clone(), 1, address_3.clone(), 1, 600), + (address_5.clone(), 4, address_4.clone(), 5, 2000), (address_1.clone(), 2, address_3.clone(), 2, 700), (address_1.clone(), 3, address_3.clone(), 3, 800), (address_1.clone(), 4, address_1.clone(), 4, 1200), From c757dbe88ef13271a84241f90becf8d2a21f1d6f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 18 Mar 2025 13:36:05 -0400 Subject: [PATCH 146/238] test: refactor and clean up tests --- .github/workflows/bitcoin-tests.yml | 6 +- .../src/tests/nakamoto_integrations.rs | 480 ++---------------- testnet/stacks-node/src/tests/signer/v0.rs | 308 ++--------- 3 files changed, 98 insertions(+), 696 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 0548ec5851e..20238b7295c 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -124,9 +124,11 @@ jobs: # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition - test-name: tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY # These mempool tests take a long time to run, and are meant to be run manually + - test-name: tests::nakamoto_integrations::large_mempool_original_constant_fee + - test-name: tests::nakamoto_integrations::large_mempool_original_random_fee + - test-name: tests::nakamoto_integrations::large_mempool_next_constant_fee + - test-name: tests::nakamoto_integrations::large_mempool_next_random_fee - test-name: tests::nakamoto_integrations::larger_mempool - - test-name: tests::signer::v0::large_mempool - - test-name: tests::signer::v0::large_mempool_random_fee - test-name: tests::signer::v0::larger_mempool steps: diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 1bc23ed955a..474c6491764 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -11239,23 +11239,22 @@ fn reload_miner_config() { run_loop_thread.join().unwrap(); } -#[test] -#[ignore] -/// This test intends to check the timing of the mempool iteration when there +/// This function intends to check the timing of the mempool iteration when there /// are a large number of transactions in the mempool. It will boot to epoch 3, /// fan out some STX transfers to a large number of accounts, wait for these to /// all be mined, and then pause block mining, and submit a large number of /// transactions to the mempool. It will then unpause block mining and check /// how long it takes for the miner to mine the first block, and how long it -/// takes to empty the mempool. -fn large_mempool() { +/// takes to empty the mempool. Several tests below call this function, testing +/// different strategies and fees. +fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + naka_conf.miner.mempool_walk_strategy = strategy; let sender_signer_sk = Secp256k1PrivateKey::random(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); @@ -11298,7 +11297,7 @@ fn large_mempool() { .collect::>(); test_observer::spawn(); - test_observer::register_any(&mut naka_conf); + test_observer::register(&mut naka_conf, &[EventKeyType::MinedBlocks]); let mempool_db_path = format!( "{}/nakamoto-neon/chainstate/mempool.sqlite", @@ -11315,9 +11314,7 @@ fn large_mempool() { let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); let run_loop_stopper = run_loop.get_termination_switch(); let Counters { - blocks_processed, - naka_proposed_blocks, - .. + blocks_processed, .. } = run_loop.counters(); let counters = run_loop.counters(); @@ -11462,7 +11459,7 @@ fn large_mempool() { // Add the new senders to the list of senders senders.extend(new_senders.iter().map(|sk| (sk, 0))); - info!("Pause mining and fill the mempool with the first round of transfers"); + info!("Pause mining and fill the mempool with the transfers"); // Pause block mining TEST_MINE_STALL.set(true); @@ -11470,90 +11467,15 @@ fn large_mempool() { let db_tx = conn.transaction().unwrap(); let timer = Instant::now(); - // Fill the mempool with the first round of transfers - for _ in 0..25 { - for (sender_sk, nonce) in senders.iter_mut() { - let sender_addr = tests::to_addr(sender_sk); - let transfer_tx = make_stacks_transfer( - sender_sk, - *nonce, - transfer_fee, - naka_conf.burnchain.chain_id, - &recipient, - 1, - ); - insert_tx_in_mempool( - &db_tx, - transfer_tx, - &sender_addr, - *nonce, - transfer_fee, - &tip.consensus_hash, - &tip.canonical_stacks_tip_hash, - tip.stacks_block_height, - ); - *nonce += 1; - } - } - db_tx.commit().unwrap(); - - info!( - "Sending first round of transfers took {:?}", - timer.elapsed() - ); - - let blocks_proposed_before = naka_proposed_blocks.load(Ordering::SeqCst); - - info!("Mining first round of transfers"); - - let timer = Instant::now(); - - // Unpause block mining - TEST_MINE_STALL.set(false); - - // Wait for the first block to be proposed. - wait_for(60, || { - let blocks_proposed = naka_proposed_blocks.load(Ordering::SeqCst); - Ok(blocks_proposed > blocks_proposed_before) - }) - .expect("Timed out waiting for first block to be mined"); - - info!( - "Mining first block of first round of transfers took {:?}", - timer.elapsed() - ); - - // Wait for the first round of transfers to all be mined - wait_for(1200, || { - for (sender_sk, nonce) in senders.iter() { - let sender_addr = tests::to_addr(sender_sk); - let account = get_account(&http_origin, &sender_addr); - if account.nonce < *nonce { - return Ok(false); - } - } - Ok(true) - }) - .expect("Timed out waiting for first round of transfers to be mined"); - - info!("Mining first round of transfers took {:?}", timer.elapsed()); - - info!("Pause mining and fill the mempool with the second round of transfers"); - - // Pause block mining - TEST_MINE_STALL.set(true); - - let db_tx = conn.transaction().unwrap(); - let timer = Instant::now(); - - // Fill the mempool with the second round of transfers + // Fill the mempool with the transfers for _ in 0..25 { for (sender_sk, nonce) in senders.iter_mut() { let sender_addr = tests::to_addr(sender_sk); + let fee = set_fee(); let transfer_tx = make_stacks_transfer( sender_sk, *nonce, - transfer_fee, + fee, naka_conf.burnchain.chain_id, &recipient, 1, @@ -11563,7 +11485,7 @@ fn large_mempool() { transfer_tx, &sender_addr, *nonce, - transfer_fee, + fee, &tip.consensus_hash, &tip.canonical_stacks_tip_hash, tip.stacks_block_height, @@ -11573,32 +11495,31 @@ fn large_mempool() { } db_tx.commit().unwrap(); - info!( - "Sending second round of transfers took {:?}", - timer.elapsed() - ); + info!("Sending transfers took {:?}", timer.elapsed()); - let blocks_proposed_before = naka_proposed_blocks.load(Ordering::SeqCst); - info!("Mining second round of transfers"); - let timer = Instant::now(); + info!("Mining transfers"); + let proposed_blocks_before = test_observer::get_mined_nakamoto_blocks().len(); // Unpause block mining TEST_MINE_STALL.set(false); // Wait for the first block to be proposed. - wait_for(60, || { - let blocks_proposed = naka_proposed_blocks.load(Ordering::SeqCst); - Ok(blocks_proposed > blocks_proposed_before) + wait_for(30, || { + let proposed_blocks = test_observer::get_mined_nakamoto_blocks().len(); + Ok(proposed_blocks > proposed_blocks_before) }) .expect("Timed out waiting for first block to be mined"); + let blocks = test_observer::get_mined_nakamoto_blocks(); + let last_block = blocks.last().unwrap(); info!( - "Mining first block of second round of transfers took {:?}", - timer.elapsed() + "First block contains {} transactions", + last_block.tx_events.len() ); + assert!(last_block.tx_events.len() > 5000); - // Wait for the second round of transfers to all be mined - wait_for(600, || { + // Wait for the transfers to all be mined + wait_for(7200, || { for (sender_sk, nonce) in senders.iter() { let sender_addr = tests::to_addr(sender_sk); let account = get_account(&http_origin, &sender_addr); @@ -11608,12 +11529,9 @@ fn large_mempool() { } Ok(true) }) - .expect("Timed out waiting for first round of transfers to be mined"); + .expect("Timed out waiting for transfers to be mined"); - info!( - "Mining second round of transfers took {:?}", - timer.elapsed() - ); + info!("Mining transfers took {:?}", timer.elapsed()); coord_channel .lock() @@ -11626,312 +11544,30 @@ fn large_mempool() { #[test] #[ignore] -/// This test intends to check the timing of the mempool iteration when there -/// are a large number of transactions in the mempool. It will boot to epoch 3, -/// fan out some STX transfers to a large number of accounts, wait for these to -/// all be mined, and then pause block mining, and submit a large number of -/// transactions to the mempool from those accounts with random fees between -/// the minimum allowed fee of 180 uSTX and 2000 uSTX. It will then unpause -/// block mining and check how long it takes for the miner to mine the first -/// block, and how long it takes to empty the mempool. -fn large_mempool_random_fee() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; - - let sender_signer_sk = Secp256k1PrivateKey::random(); - let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk]); - naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); - let stacker_sk = setup_stacker(&mut naka_conf); - let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - - let transfer_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - - // Start with 10 accounts with initial balances. - let initial_sender_sks = (0..10) - .map(|_| StacksPrivateKey::random()) - .collect::>(); - let initial_sender_addrs = initial_sender_sks - .iter() - .map(|sk| tests::to_addr(sk)) - .collect::>(); - - // These 10 accounts will send to 25 accounts each, then those 260 accounts - // will send to 25 accounts each, for a total of 6760 accounts. - // At the end of the funding round, we want to have 6760 accounts with - // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. - // With a fee of 180 - 2000 uSTX per send, we need each account to end up - // with 2001 * 25 = 50_025 uSTX. - // The 260 accounts in the middle will need to have - // (50025 + 180) * 26 = 1_305_330 uSTX. - // The 10 initial accounts will need to have - // (1305330 + 180) * 26 = 33_943_260 uSTX. - let initial_balance = 33_943_260; - for addr in initial_sender_addrs.iter() { - naka_conf.add_initial_balance(PrincipalData::from(*addr).to_string(), initial_balance); - } - // This will hold tuples for all of our senders, with the sender pk and - // the nonce - let mut senders = initial_sender_sks - .iter() - .map(|sk| (sk, 0)) - .collect::>(); - - test_observer::spawn(); - test_observer::register_any(&mut naka_conf); - - let mempool_db_path = format!( - "{}/nakamoto-neon/chainstate/mempool.sqlite", - naka_conf.node.working_dir - ); - - let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - btc_regtest_controller.bootstrap_chain(201); - - let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); - let run_loop_stopper = run_loop.get_termination_switch(); - let Counters { - blocks_processed, - naka_proposed_blocks, - .. - } = run_loop.counters(); - let counters = run_loop.counters(); - - let coord_channel = run_loop.coordinator_channels(); - - let run_loop_thread = thread::Builder::new() - .name("run_loop".into()) - .spawn(move || run_loop.start(None, 0)) - .unwrap(); - wait_for_runloop(&blocks_processed); - boot_to_epoch_3( - &naka_conf, - &blocks_processed, - &[stacker_sk], - &[sender_signer_sk], - &mut Some(&mut signers), - &mut btc_regtest_controller, - ); - - info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); - blind_signer(&naka_conf, &signers, &counters); - - next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); - - let burnchain = naka_conf.get_burnchain(); - let sortdb = burnchain.open_sortition_db(true).unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - // Open a sqlite DB at mempool_db_path so that we can quickly add - // transactions to the mempool. - let mut conn = Connection::open(&mempool_db_path).unwrap(); - let db_tx = conn.transaction().unwrap(); - - info!("Sending the first round of funding"); - let timer = Instant::now(); - let mut new_senders = vec![]; - for (sender_sk, nonce) in senders.iter_mut() { - for _ in 0..25 { - let recipient_sk = StacksPrivateKey::random(); - let recipient_addr = tests::to_addr(&recipient_sk); - let sender_addr = tests::to_addr(sender_sk); - let transfer_tx = make_stacks_transfer( - sender_sk, - *nonce, - transfer_fee, - naka_conf.burnchain.chain_id, - &recipient_addr.into(), - 1_305_330, - ); - insert_tx_in_mempool( - &db_tx, - transfer_tx, - &sender_addr, - *nonce, - transfer_fee, - &tip.consensus_hash, - &tip.canonical_stacks_tip_hash, - tip.stacks_block_height, - ); - *nonce += 1; - new_senders.push(recipient_sk); - } - } - db_tx.commit().unwrap(); - - info!("Sending first round of funding took {:?}", timer.elapsed()); - - // Wait for the first round of funding to be mined - wait_for(120, || { - for (sender_sk, nonce) in senders.iter() { - let sender_addr = tests::to_addr(sender_sk); - let account = get_account(&http_origin, &sender_addr); - if account.nonce < *nonce { - return Ok(false); - } - } - Ok(true) - }) - .expect("Timed out waiting for first round of funding to be mined"); - - info!( - "Sending and mining first round of funding took {:?}", - timer.elapsed() - ); - - // Add the new senders to the list of senders - senders.extend(new_senders.iter().map(|sk| (sk, 0))); - - info!("Sending the second round of funding"); - let db_tx = conn.transaction().unwrap(); - let timer = Instant::now(); - let mut new_senders = vec![]; - for (sender_sk, nonce) in senders.iter_mut() { - for _ in 0..25 { - let sender_addr = tests::to_addr(sender_sk); - let recipient_sk = StacksPrivateKey::random(); - let recipient_addr = tests::to_addr(&recipient_sk); - let transfer_tx = make_stacks_transfer( - sender_sk, - *nonce, - transfer_fee, - naka_conf.burnchain.chain_id, - &recipient_addr.into(), - 50_025, - ); - insert_tx_in_mempool( - &db_tx, - transfer_tx, - &sender_addr, - *nonce, - transfer_fee, - &tip.consensus_hash, - &tip.canonical_stacks_tip_hash, - tip.stacks_block_height, - ); - *nonce += 1; - new_senders.push(recipient_sk); - } - } - db_tx.commit().unwrap(); - - info!("Sending second round of funding took {:?}", timer.elapsed()); - - // Wait for the second round of funding to be mined - wait_for(120, || { - for (sender_sk, nonce) in senders.iter() { - let sender_addr = tests::to_addr(sender_sk); - let account = get_account(&http_origin, &sender_addr); - if account.nonce < *nonce { - return Ok(false); - } - } - Ok(true) - }) - .expect("Timed out waiting for second round of funding to be mined"); - - info!( - "Sending and mining second round of funding took {:?}", - timer.elapsed() - ); - - // Add the new senders to the list of senders - senders.extend(new_senders.iter().map(|sk| (sk, 0))); - - info!("Pause mining and fill the mempool with the first round of transfers"); - - // Pause block mining - TEST_MINE_STALL.set(true); - - let timer = Instant::now(); - - // Fill the mempool with the transfers - let db_tx = conn.transaction().unwrap(); - for _ in 0..25 { - for (sender_sk, nonce) in senders.iter_mut() { - let sender_addr = tests::to_addr(sender_sk); - let fee = thread_rng().gen_range(180..2000); - let transfer_tx = make_stacks_transfer( - sender_sk, - *nonce, - fee, - naka_conf.burnchain.chain_id, - &recipient, - 1, - ); - insert_tx_in_mempool( - &db_tx, - transfer_tx, - &sender_addr, - *nonce, - fee, - &tip.consensus_hash, - &tip.canonical_stacks_tip_hash, - tip.stacks_block_height, - ); - *nonce += 1; - } - } - db_tx.commit().unwrap(); - - info!( - "Sending first round of transfers took {:?}", - timer.elapsed() - ); - - let blocks_proposed_before = naka_proposed_blocks.load(Ordering::SeqCst); - - info!("Mining first round of transfers"); - - let timer = Instant::now(); - - // Unpause block mining - TEST_MINE_STALL.set(false); - - // Wait for the first block to be proposed. - wait_for(10, || { - let blocks_proposed = naka_proposed_blocks.load(Ordering::SeqCst); - Ok(blocks_proposed > blocks_proposed_before) - }) - .expect("Timed out waiting for first block to be mined"); - - info!( - "Mining first block of first round of transfers took {:?}", - timer.elapsed() - ); - - // Wait for the first round of transfers to all be mined - wait_for(3600, || { - for (sender_sk, nonce) in senders.iter() { - let sender_addr = tests::to_addr(sender_sk); - let account = get_account(&http_origin, &sender_addr); - if account.nonce < *nonce { - return Ok(false); - } - } - Ok(true) - }) - .expect("Timed out waiting for first round of transfers to be mined"); +fn large_mempool_original_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || 180); +} - info!("Mining first round of transfers took {:?}", timer.elapsed()); +#[test] +#[ignore] +fn large_mempool_original_random_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || { + thread_rng().gen_range(180..2000) + }); +} - coord_channel - .lock() - .expect("Mutex poisoned") - .stop_chains_coordinator(); - run_loop_stopper.store(false, Ordering::SeqCst); +#[test] +#[ignore] +fn large_mempool_next_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || 180); +} - run_loop_thread.join().unwrap(); +#[test] +#[ignore] +fn large_mempool_next_random_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || { + thread_rng().gen_range(180..2000) + }); } #[test] @@ -12158,7 +11794,7 @@ fn larger_mempool() { // Add the new senders to the list of senders senders.extend(new_senders.iter().map(|sk| (sk, 0))); - info!("Pause mining and fill the mempool with the first round of transfers"); + info!("Pause mining and fill the mempool with the transfers"); // Pause block mining TEST_MINE_STALL.set(true); @@ -12196,14 +11832,11 @@ fn larger_mempool() { db_tx.commit().unwrap(); } - info!( - "Sending first round of transfers took {:?}", - timer.elapsed() - ); + info!("Sending transfers took {:?}", timer.elapsed()); let blocks_proposed_before = naka_proposed_blocks.load(Ordering::SeqCst); - info!("Mining first round of transfers"); + info!("Mining transfers"); let timer = Instant::now(); @@ -12217,12 +11850,9 @@ fn larger_mempool() { }) .expect("Timed out waiting for first block to be mined"); - info!( - "Mining first block of first round of transfers took {:?}", - timer.elapsed() - ); + info!("Mining first block of transfers took {:?}", timer.elapsed()); - // Wait for the first round of transfers to all be mined + // Wait for the transfers to all be mined wait_for(7200, || { for (sender_sk, nonce) in senders.iter() { let sender_addr = tests::to_addr(sender_sk); @@ -12233,9 +11863,9 @@ fn larger_mempool() { } Ok(true) }) - .expect("Timed out waiting for first round of transfers to be mined"); + .expect("Timed out waiting for transfers to be mined"); - info!("Mining first round of transfers took {:?}", timer.elapsed()); + info!("Mining transfers took {:?}", timer.elapsed()); coord_channel .lock() diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 322b3a00e53..5250107c36c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -29,6 +29,7 @@ use libsigner::v0::messages::{ use libsigner::{ BlockProposal, BlockProposalData, SignerSession, StackerDBSession, VERSION_STRING, }; +use rand::{thread_rng, Rng}; use rusqlite::Connection; use stacks::address::AddressHashMode; use stacks::burnchains::Txid; @@ -12442,20 +12443,19 @@ fn signer_can_accept_rejected_block() { signer_test.shutdown(); } -#[test] -#[ignore] -/// This test intends to check the timing of the mempool iteration when there -/// are a large number of transactions in the mempool. It will boot to epoch 3, -/// fan out some STX transfers to a large number of accounts, wait for these to -/// all be mined, and then pause block mining, and submit a large number of -/// transactions to the mempool from those accounts, all with the same fee. It -/// will then unpause block mining and wait for the first block to be mined. -/// Since the default miner configuration specifies to spend 5 seconds mining a -/// block, we expect that this first block should be proposed within 10 seconds -/// and approved within 20 seconds. We also verify that the block contains at -/// least 5,000 transactions, since a lower count than that would indicate a -/// clear regression. -fn large_mempool() { +/// This function intends to check the timing of the mempool iteration when +/// there are a large number of transactions in the mempool. It will boot to +/// epoch 3, fan out some STX transfers to a large number of accounts, wait for +/// these to all be mined, and then pause block mining, and submit a large +/// number of transactions to the mempool. It will then unpause block mining +/// and wait for the first block to be mined. Since the default miner +/// configuration specifies to spend 5 seconds mining a block, we expect that +/// this first block should be proposed within 10 seconds and approved within +/// 20 seconds. We also verify that the block contains at least 5,000 +/// transactions, since a lower count than that would indicate a clear +/// regression. Several tests below call this function, testing different +/// strategies and fees. +fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -12495,7 +12495,7 @@ fn large_mempool() { |_| {}, |conf| { conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; + conf.miner.mempool_walk_strategy = strategy; }, None, None, @@ -12647,14 +12647,14 @@ fn large_mempool() { for _ in 0..25 { for (sender_sk, nonce) in senders.iter_mut() { let sender_addr = tests::to_addr(sender_sk); - let transfer_tx = - make_stacks_transfer(sender_sk, *nonce, transfer_fee, chain_id, &recipient, 1); + let fee = set_fee(); + let transfer_tx = make_stacks_transfer(sender_sk, *nonce, fee, chain_id, &recipient, 1); insert_tx_in_mempool( &db_tx, transfer_tx, &sender_addr, *nonce, - transfer_fee, + fee, &tip.consensus_hash, &tip.canonical_stacks_tip_hash, tip.stacks_block_height, @@ -12701,260 +12701,30 @@ fn large_mempool() { #[test] #[ignore] -/// This test intends to check the timing of the mempool iteration when there -/// are a large number of transactions in the mempool. It will boot to epoch 3, -/// fan out some STX transfers to a large number of accounts, wait for these to -/// all be mined, and then pause block mining, and submit a large number of -/// transactions to the mempool from those accounts with random fees between -/// the minimum allowed fee of 180 uSTX and 2000 uSTX. It will then unpause -/// block mining and wait for the first block to be mined. Since the default -/// miner configuration specifies to spend 5 seconds mining a block, we expect -/// that this first block should be proposed within 10 seconds and approved -/// within 20 seconds. We also verify that the block contains at least 5,000 -/// transactions, since a lower count than that would indicate a clear -/// regression. -fn large_mempool_random_fee() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let transfer_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - - // Start with 10 accounts with initial balances. - let initial_sender_sks = (0..10) - .map(|_| StacksPrivateKey::random()) - .collect::>(); - let initial_sender_addrs = initial_sender_sks - .iter() - .map(|sk| tests::to_addr(sk)) - .collect::>(); - - // These 10 accounts will send to 25 accounts each, then those 260 accounts - // will send to 25 accounts each, for a total of 6760 accounts. - // At the end of the funding round, we want to have 6760 accounts with - // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. - // With a fee of 180 - 2000 uSTX per send, we need each account to end up - // with 2001 * 25 * 10 = 500_250 uSTX. - // The 260 accounts in the middle will need to have - // (500250 + 180) * 26 = 13_011_180 uSTX. - // The 10 initial accounts will need to have - // (13011180 + 180) * 26 = 338_295_360 uSTX. - let initial_balance = 338_295_360; - let initial_balances = initial_sender_addrs - .iter() - .map(|addr| (addr.clone(), initial_balance)) - .collect::>(); - - let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( - num_signers, - initial_balances, - |_| {}, - |conf| { - conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - conf.miner.mempool_walk_strategy = MemPoolWalkStrategy::NextNonceWithHighestFeeRate; - }, - None, - None, - ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - signer_test.boot_to_epoch_3(); - - // This will hold tuples for all of our senders, with the sender pk and - // the nonce - let mut senders = initial_sender_sks - .iter() - .map(|sk| (sk, 0)) - .collect::>(); - - let mempool_db_path = format!( - "{}/nakamoto-neon/chainstate/mempool.sqlite", - signer_test.running_nodes.conf.node.working_dir - ); - let chain_id = signer_test.running_nodes.conf.burnchain.chain_id; - let burnchain = signer_test.running_nodes.conf.get_burnchain(); - let sortdb = burnchain.open_sortition_db(true).unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - // Open a sqlite DB at mempool_db_path so that we can quickly add - // transactions to the mempool. - let mut conn = Connection::open(&mempool_db_path).unwrap(); - let db_tx = conn.transaction().unwrap(); - - info!("Sending the first round of funding"); - let timer = Instant::now(); - let mut new_senders = vec![]; - for (sender_sk, nonce) in senders.iter_mut() { - for _ in 0..25 { - let recipient_sk = StacksPrivateKey::random(); - let recipient_addr = tests::to_addr(&recipient_sk); - let sender_addr = tests::to_addr(sender_sk); - let transfer_tx = make_stacks_transfer( - sender_sk, - *nonce, - transfer_fee, - chain_id, - &recipient_addr.into(), - 13_011_180, - ); - insert_tx_in_mempool( - &db_tx, - transfer_tx, - &sender_addr, - *nonce, - transfer_fee, - &tip.consensus_hash, - &tip.canonical_stacks_tip_hash, - tip.stacks_block_height, - ); - *nonce += 1; - new_senders.push(recipient_sk); - } - } - db_tx.commit().unwrap(); - - info!("Sending first round of funding took {:?}", timer.elapsed()); - - // Wait for the first round of funding to be mined - wait_for(120, || { - for (sender_sk, nonce) in senders.iter() { - let sender_addr = tests::to_addr(sender_sk); - let account = get_account(&http_origin, &sender_addr); - if account.nonce < *nonce { - return Ok(false); - } - } - Ok(true) - }) - .expect("Timed out waiting for first round of funding to be mined"); - - info!( - "Sending and mining first round of funding took {:?}", - timer.elapsed() - ); - - // Add the new senders to the list of senders - senders.extend(new_senders.iter().map(|sk| (sk, 0))); - - info!("Sending the second round of funding"); - let db_tx = conn.transaction().unwrap(); - let timer = Instant::now(); - let mut new_senders = vec![]; - for (sender_sk, nonce) in senders.iter_mut() { - for _ in 0..25 { - let sender_addr = tests::to_addr(sender_sk); - let recipient_sk = StacksPrivateKey::random(); - let recipient_addr = tests::to_addr(&recipient_sk); - let transfer_tx = make_stacks_transfer( - sender_sk, - *nonce, - transfer_fee, - chain_id, - &recipient_addr.into(), - 500_250, - ); - insert_tx_in_mempool( - &db_tx, - transfer_tx, - &sender_addr, - *nonce, - transfer_fee, - &tip.consensus_hash, - &tip.canonical_stacks_tip_hash, - tip.stacks_block_height, - ); - *nonce += 1; - new_senders.push(recipient_sk); - } - } - db_tx.commit().unwrap(); - - info!("Sending second round of funding took {:?}", timer.elapsed()); - - // Wait for the second round of funding to be mined - wait_for(120, || { - for (sender_sk, nonce) in senders.iter() { - let sender_addr = tests::to_addr(sender_sk); - let account = get_account(&http_origin, &sender_addr); - if account.nonce < *nonce { - return Ok(false); - } - } - Ok(true) - }) - .expect("Timed out waiting for second round of funding to be mined"); - - info!( - "Sending and mining second round of funding took {:?}", - timer.elapsed() - ); - - // Add the new senders to the list of senders - senders.extend(new_senders.iter().map(|sk| (sk, 0))); - - info!("Pause mining and fill the mempool with the transfers"); - - // Pause block mining - TEST_MINE_STALL.set(true); - - let db_tx = conn.transaction().unwrap(); - let timer = Instant::now(); - - // Fill the mempool with the first round of transfers - for _ in 0..25 { - for (sender_sk, nonce) in senders.iter_mut() { - let sender_addr = tests::to_addr(sender_sk); - let transfer_tx = - make_stacks_transfer(sender_sk, *nonce, transfer_fee, chain_id, &recipient, 1); - insert_tx_in_mempool( - &db_tx, - transfer_tx, - &sender_addr, - *nonce, - transfer_fee, - &tip.consensus_hash, - &tip.canonical_stacks_tip_hash, - tip.stacks_block_height, - ); - *nonce += 1; - } - } - db_tx.commit().unwrap(); - - info!("Sending transfers took {:?}", timer.elapsed()); - - let proposed_blocks_before = test_observer::get_mined_nakamoto_blocks().len(); - let blocks_before = test_observer::get_blocks().len(); - - info!("Mining transfers..."); - - // Unpause block mining - TEST_MINE_STALL.set(false); - - // Wait for the first block to be proposed. - wait_for(30, || { - let proposed_blocks = test_observer::get_mined_nakamoto_blocks().len(); - Ok(proposed_blocks > proposed_blocks_before) - }) - .expect("Timed out waiting for first block to be mined"); +fn large_mempool_original_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || 180); +} - let blocks = test_observer::get_mined_nakamoto_blocks(); - let last_block = blocks.last().unwrap(); - info!( - "First block contains {} transactions", - last_block.tx_events.len() - ); - assert!(last_block.tx_events.len() > 5000); +#[test] +#[ignore] +fn large_mempool_original_random_fee() { + large_mempool_base(MemPoolWalkStrategy::GlobalFeeRate, || { + thread_rng().gen_range(180..2000) + }); +} - // Wait for the first block to be accepted. - wait_for(20, || { - let blocks = test_observer::get_blocks().len(); - Ok(blocks > blocks_before) - }) - .expect("Timed out waiting for first block to be mined"); +#[test] +#[ignore] +fn large_mempool_next_constant_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || 180); +} - signer_test.shutdown(); +#[test] +#[ignore] +fn large_mempool_next_random_fee() { + large_mempool_base(MemPoolWalkStrategy::NextNonceWithHighestFeeRate, || { + thread_rng().gen_range(180..2000) + }); } #[test] From ab22c32bc5b7d3b02967fd586fd3c6c3d20d891a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 18 Mar 2025 13:41:28 -0400 Subject: [PATCH 147/238] test: only assert transaction count for new strategy --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 +++- testnet/stacks-node/src/tests/signer/v0.rs | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 474c6491764..30154d4839e 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -11516,7 +11516,9 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) "First block contains {} transactions", last_block.tx_events.len() ); - assert!(last_block.tx_events.len() > 5000); + if strategy == MemPoolWalkStrategy::NextNonceWithHighestFeeRate { + assert!(last_block.tx_events.len() > 5000); + } // Wait for the transfers to all be mined wait_for(7200, || { diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5250107c36c..da08e40884a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12687,7 +12687,9 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) "First block contains {} transactions", last_block.tx_events.len() ); - assert!(last_block.tx_events.len() > 5000); + if strategy == MemPoolWalkStrategy::NextNonceWithHighestFeeRate { + assert!(last_block.tx_events.len() > 5000); + } // Wait for the first block to be accepted. wait_for(20, || { From 67cc828abda600dc9aa6ab8cdf3cdbcff089dbba Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 18 Mar 2025 14:10:27 -0400 Subject: [PATCH 148/238] chore: clarify order in test --- .../stacks/tests/block_construction.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index e5e4ced4e04..67980fbfd05 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5244,24 +5244,24 @@ fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { // Expected transaction consideration order, sorted by mineable first (next origin+sponsor nonces, highest fee). // Ignores old and very future nonces. let expected_tx_order = vec![ - (address_2.clone(), 6, address_4.clone(), 1, 1000), + (address_2.clone(), 6, address_4.clone(), 1, 1000), // Round 1 (address_5.clone(), 0, address_5.clone(), 0, 500), (address_0.clone(), 2, address_0.clone(), 2, 300), - (address_2.clone(), 7, address_4.clone(), 2, 800), + (address_2.clone(), 7, address_4.clone(), 2, 800), // Round 2 (address_5.clone(), 1, address_5.clone(), 1, 500), (address_0.clone(), 3, address_0.clone(), 3, 400), - (address_2.clone(), 8, address_2.clone(), 8, 1000), + (address_2.clone(), 8, address_2.clone(), 8, 1000), // Round 3 (address_4.clone(), 3, address_5.clone(), 2, 550), (address_0.clone(), 4, address_3.clone(), 0, 500), - (address_5.clone(), 3, address_4.clone(), 4, 2000), + (address_5.clone(), 3, address_4.clone(), 4, 2000), // Round 4 (address_1.clone(), 1, address_3.clone(), 1, 600), - (address_5.clone(), 4, address_4.clone(), 5, 2000), + (address_5.clone(), 4, address_4.clone(), 5, 2000), // Round 5 (address_1.clone(), 2, address_3.clone(), 2, 700), - (address_1.clone(), 3, address_3.clone(), 3, 800), - (address_1.clone(), 4, address_1.clone(), 4, 1200), + (address_1.clone(), 3, address_3.clone(), 3, 800), // Round 6 + (address_1.clone(), 4, address_1.clone(), 4, 1200), // Round 7 (address_3.clone(), 4, address_3.clone(), 4, 100), - (address_2.clone(), 9, address_3.clone(), 5, 1000), - (address_2.clone(), 10, address_3.clone(), 6, 1500), + (address_2.clone(), 9, address_3.clone(), 5, 1000), // Round 8 + (address_2.clone(), 10, address_3.clone(), 6, 1500), // Round 9 ]; assert_eq!( considered_txs, expected_tx_order, From efb748039815503af404f5d1619ac75e114438c9 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 18 Mar 2025 11:58:06 -0700 Subject: [PATCH 149/238] Remove armv7 build and restore order of operations in release workflow --- .github/workflows/github-release.yml | 51 ++++++++++++++-------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 68869b8829c..75155649b6e 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -63,7 +63,6 @@ jobs: - windows cpu: - arm64 - - armv7 - x86-64 ## defaults to x86-64-v3 variant - intel haswell (2013) and newer # - x86-64-v2 ## intel nehalem (2008) and newer # - x86-64-v3 ## intel haswell (2013) and newer @@ -86,29 +85,20 @@ jobs: signer_docker_tag: ${{ inputs.signer_docker_tag }} is_node_release: ${{ inputs.is_node_release }} - ## Builds arch dependent Docker images from binaries - ## ## Runs when the following is true: ## - either node or signer tag is provided - docker-image: + create-release: if: | inputs.node_tag != '' || inputs.signer_tag != '' - name: Docker Image (Binary) + name: Create Release runs-on: ubuntu-latest needs: - build-binaries - strategy: - fail-fast: false - ## Build a maximum of 2 images concurrently based on matrix.dist - max-parallel: 2 - matrix: - dist: - - alpine - - debian steps: - - name: Create Docker Image - uses: stacks-network/actions/stacks-core/release/docker-images@main + ## Creates releases + - name: Create Release + uses: stacks-network/actions/stacks-core/release/create-releases@main with: node_tag: ${{ inputs.node_tag }} node_docker_tag: ${{ inputs.node_docker_tag }} @@ -116,25 +106,34 @@ jobs: signer_docker_tag: ${{ inputs.signer_docker_tag }} is_node_release: ${{ inputs.is_node_release }} is_signer_release: ${{ inputs.is_signer_release }} - DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} - dist: ${{ matrix.dist }} + GH_TOKEN: ${{ secrets.GH_TOKEN }} + + ## Builds arch dependent Docker images from binaries + ## + ## Note: this step requires the binaries in the create-release step to be uploaded ## Runs when the following is true: ## - either node or signer tag is provided - create-release: + docker-image: if: | inputs.node_tag != '' || inputs.signer_tag != '' - name: Create Release + name: Docker Image (Binary) runs-on: ubuntu-latest needs: - build-binaries - - docker-image + - create-release + strategy: + fail-fast: false + ## Build a maximum of 2 images concurrently based on matrix.dist + max-parallel: 2 + matrix: + dist: + - alpine + - debian steps: - ## Creates releases - - name: Create Release - uses: stacks-network/actions/stacks-core/release/create-releases@main + - name: Create Docker Image + uses: stacks-network/actions/stacks-core/release/docker-images@main with: node_tag: ${{ inputs.node_tag }} node_docker_tag: ${{ inputs.node_docker_tag }} @@ -142,7 +141,9 @@ jobs: signer_docker_tag: ${{ inputs.signer_docker_tag }} is_node_release: ${{ inputs.is_node_release }} is_signer_release: ${{ inputs.is_signer_release }} - GH_TOKEN: ${{ secrets.GH_TOKEN }} + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + dist: ${{ matrix.dist }} ## Create the downstream PR for the release branch to master,develop create-pr: From 766278fe0417c52026f1767d8b0b6bbff5fd54ef Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 18 Mar 2025 17:45:58 -0400 Subject: [PATCH 150/238] fix: only open `nonce_conn` once in `iterate_candidates` --- stackslib/src/core/mempool.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 971144de50e..ef52342854d 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1573,6 +1573,7 @@ impl MemPoolDB { debug!("Mempool walk for {}ms", settings.max_walk_time_ms,); let mut nonce_cache = NonceCache::new(settings.nonce_cache_size); + let mut nonce_conn = self.reopen(true)?; // == Queries for `GlobalFeeRate` mempool walk strategy // @@ -1728,7 +1729,6 @@ impl MemPoolDB { state_changed = true; // Check the nonces. - let mut nonce_conn = self.reopen(false)?; let expected_origin_nonce = nonce_cache.get(&candidate.origin_address, clarity_tx, &mut nonce_conn); let expected_sponsor_nonce = @@ -1899,7 +1899,6 @@ impl MemPoolDB { // Flush the nonce cache to the database before performing the next // query. - let mut nonce_conn = self.reopen(true)?; nonce_cache.flush(&mut nonce_conn); }; From 5002ef6e6b939790d82b5170bb6465bb2f6ab233 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 18 Mar 2025 20:18:25 -0500 Subject: [PATCH 151/238] add miner inactivity handling to local state machine, lots of test assertions --- libsigner/src/events.rs | 52 +- stacks-signer/src/runloop.rs | 63 +-- stacks-signer/src/signerdb.rs | 63 ++- stacks-signer/src/tests/chainstate.rs | 13 +- stacks-signer/src/v0/signer.rs | 22 +- stacks-signer/src/v0/signer_state.rs | 230 +++++++-- testnet/stacks-node/src/tests/signer/mod.rs | 285 ++++++++--- testnet/stacks-node/src/tests/signer/v0.rs | 498 ++++++++------------ 8 files changed, 785 insertions(+), 441 deletions(-) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 0c1bcf63a3c..b0dc1b3bc83 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -32,6 +32,7 @@ use blockstack_lib::net::api::postblock_proposal::{ use blockstack_lib::net::stackerdb::MINER_SLOT_COUNT; use blockstack_lib::util_lib::boot::boot_code_id; use blockstack_lib::version_string; +use clarity::types::chainstate::StacksBlockId; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::QualifiedContractIdentifier; use serde::{Deserialize, Serialize}; @@ -202,13 +203,20 @@ pub enum SignerEvent { burn_height: u64, /// the burn hash for the newly processed burn block burn_header_hash: BurnchainHeaderHash, + /// the consensus hash for the newly processed burn block + consensus_hash: ConsensusHash, /// the time at which this event was received by the signer's event processor received_time: SystemTime, }, /// A new processed Stacks block was received from the node with the given block hash NewBlock { - /// The block header hash for the newly processed stacks block - block_hash: Sha512Trunc256Sum, + /// The stacks block ID (or index block hash) of the new block + block_id: StacksBlockId, + /// The consensus hash of the block (either the tenure it was produced during for Stacks 3.0 + /// or the burn block that won the sortition in Stacks 2.0) + consensus_hash: ConsensusHash, + /// The signer sighash for the newly processed stacks block + signer_sighash: Sha512Trunc256Sum, /// The block height for the newly processed stacks block block_height: u64, }, @@ -556,6 +564,7 @@ struct BurnBlockEvent { reward_recipients: Vec, reward_slot_holders: Vec, burn_amount: u64, + consensus_hash: String, } impl TryFrom for SignerEvent { @@ -571,17 +580,29 @@ impl TryFrom for SignerEvent { .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) })?; + let consensus_hash = burn_block_event + .consensus_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + ConsensusHash::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + Ok(SignerEvent::NewBurnBlock { burn_height: burn_block_event.burn_block_height, received_time: SystemTime::now(), burn_header_hash, + consensus_hash, }) } } #[derive(Debug, Deserialize)] struct BlockEvent { - block_hash: String, + index_block_hash: String, + signer_signature_hash: String, + consensus_hash: String, block_height: u64, } @@ -589,16 +610,35 @@ impl TryFrom for SignerEvent { type Error = EventError; fn try_from(block_event: BlockEvent) -> Result { - let block_hash: Sha512Trunc256Sum = block_event - .block_hash + let signer_sighash = block_event + .signer_signature_hash .get(2..) .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) .and_then(|hex| { Sha512Trunc256Sum::from_hex(hex) .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) })?; + let consensus_hash = block_event + .consensus_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + ConsensusHash::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + let block_id = block_event + .index_block_hash + .get(2..) + .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) + .and_then(|hex| { + StacksBlockId::from_hex(hex) + .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) + })?; + Ok(SignerEvent::NewBlock { - block_hash, + block_id, + signer_sighash, + consensus_hash, block_height: block_event.block_height, }) } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 2d3bf12e989..2af3ca5155f 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -500,37 +500,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> "Running one pass for the signer. state={:?}, event={event:?}", self.state ); - // This is the only event that we respond to from the outer signer runloop - if let Some(SignerEvent::StatusCheck) = event { - info!("Signer status check requested: {:?}.", self.state); - if let Err(e) = res.send(vec![StateInfo { - runloop_state: self.state, - reward_cycle_info: self.current_reward_cycle_info, - running_signers: self - .stacks_signers - .values() - .map(|s| s.reward_cycle()) - .collect(), - signer_state_machines: self - .stacks_signers - .iter() - .map(|(reward_cycle, signer)| { - let ConfiguredSigner::RegisteredSigner(ref signer) = signer else { - return (*reward_cycle, None); - }; - ( - *reward_cycle, - Some(signer.get_local_state_machine().clone()), - ) - }) - .collect(), - } - .into()]) - { - error!("Failed to send status check result: {e}."); - } - } - if self.state == State::Uninitialized { if let Err(e) = self.initialize_runloop() { error!("Failed to initialize signer runloop: {e}."); @@ -564,6 +533,38 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> current_reward_cycle, ); } + + // This is the only event that we respond to from the outer signer runloop + if let Some(SignerEvent::StatusCheck) = event { + info!("Signer status check requested: {:?}.", self.state); + if let Err(e) = res.send(vec![StateInfo { + runloop_state: self.state, + reward_cycle_info: self.current_reward_cycle_info, + running_signers: self + .stacks_signers + .values() + .map(|s| s.reward_cycle()) + .collect(), + signer_state_machines: self + .stacks_signers + .iter() + .map(|(reward_cycle, signer)| { + let ConfiguredSigner::RegisteredSigner(ref signer) = signer else { + return (*reward_cycle, None); + }; + ( + *reward_cycle, + Some(signer.get_local_state_machine().clone()), + ) + }) + .collect(), + } + .into()]) + { + error!("Failed to send status check result: {e}."); + } + } + if self.state == State::NoRegisteredSigners && event.is_some() { let next_reward_cycle = current_reward_cycle.saturating_add(1); info!("Signer is not registered for the current reward cycle ({current_reward_cycle}). Reward set is not yet determined or signer is not registered for the upcoming reward cycle ({next_reward_cycle})."); diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5d166a3ecfb..40919bcf767 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -510,6 +510,15 @@ ALTER TABLE block_rejection_signer_addrs ADD COLUMN reject_code INTEGER; "#; +static ADD_CONSENSUS_HASH: &str = r#" +ALTER TABLE burn_blocks + ADD COLUMN consensus_hash TEXT; +"#; + +static ADD_CONSENSUS_HASH_INDEX: &str = r#" +CREATE INDEX IF NOT EXISTS burn_blocks_ch on burn_blocks (consensus_hash); +"#; + static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, CREATE_DB_CONFIG, @@ -579,9 +588,15 @@ static SCHEMA_9: &[&str] = &[ "INSERT INTO db_config (version) VALUES (9);", ]; +static SCHEMA_10: &[&str] = &[ + ADD_CONSENSUS_HASH, + ADD_CONSENSUS_HASH_INDEX, + "INSERT INTO db_config (version) VALUES (10);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 9; + pub const SCHEMA_VERSION: u32 = 10; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -723,7 +738,7 @@ impl SignerDb { Ok(()) } - /// Migrate from schema 9 to schema 9 + /// Migrate from schema 8 to schema 9 fn schema_9_migration(tx: &Transaction) -> Result<(), DBError> { if Self::get_schema_version(tx)? >= 9 { // no migration necessary @@ -737,6 +752,20 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 9 to schema 10 + fn schema_10_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 10 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_10.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + /// Register custom scalar functions used by the database fn register_scalar_functions(&self) -> Result<(), DBError> { // Register helper function for determining if a block is a tenure change transaction @@ -779,7 +808,8 @@ impl SignerDb { 6 => Self::schema_7_migration(&sql_tx)?, 7 => Self::schema_8_migration(&sql_tx)?, 8 => Self::schema_9_migration(&sql_tx)?, - 9 => break, + 9 => Self::schema_10_migration(&sql_tx)?, + 10 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, @@ -911,6 +941,7 @@ impl SignerDb { pub fn insert_burn_block( &mut self, burn_hash: &BurnchainHeaderHash, + consensus_hash: &ConsensusHash, burn_height: u64, received_time: &SystemTime, ) -> Result<(), DBError> { @@ -918,11 +949,12 @@ impl SignerDb { .duration_since(std::time::UNIX_EPOCH) .map_err(|e| DBError::Other(format!("Bad system time: {e}")))? .as_secs(); - debug!("Inserting burn block info"; "burn_block_height" => burn_height, "burn_hash" => %burn_hash, "received" => received_ts); + debug!("Inserting burn block info"; "burn_block_height" => burn_height, "burn_hash" => %burn_hash, "received" => received_ts, "ch" => %consensus_hash); self.db.execute( - "INSERT OR REPLACE INTO burn_blocks (block_hash, block_height, received_time) VALUES (?1, ?2, ?3)", + "INSERT OR REPLACE INTO burn_blocks (block_hash, consensus_hash, block_height, received_time) VALUES (?1, ?2, ?3, ?4)", params![ burn_hash, + consensus_hash, u64_to_sql(burn_height)?, u64_to_sql(received_ts)?, ], @@ -947,6 +979,23 @@ impl SignerDb { Ok(Some(receive_time)) } + /// Get timestamp (epoch seconds) at which a burn block was received over the event dispatcheer by this signer + /// if that burn block has been received. + pub fn get_burn_block_receive_time_ch( + &self, + ch: &ConsensusHash, + ) -> Result, DBError> { + let query = "SELECT received_time FROM burn_blocks WHERE consensus_hash = ? LIMIT 1"; + let Some(receive_time_i64) = query_row::(&self.db, query, &[ch])? else { + return Ok(None); + }; + let receive_time = u64::try_from(receive_time_i64).map_err(|e| { + error!("Failed to parse db received_time as u64: {e}"); + DBError::Corruption + })?; + Ok(Some(receive_time)) + } + /// Insert or replace a block into the database. /// Preserves the `broadcast` column if replacing an existing block. pub fn insert_block(&mut self, block_info: &BlockInfo) -> Result<(), DBError> { @@ -1497,12 +1546,14 @@ mod tests { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); let test_burn_hash = BurnchainHeaderHash([10; 32]); + let test_consensus_hash = ConsensusHash([13; 20]); let stime = SystemTime::now(); let time_to_epoch = stime .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs(); - db.insert_burn_block(&test_burn_hash, 10, &stime).unwrap(); + db.insert_burn_block(&test_burn_hash, &test_consensus_hash, 10, &stime) + .unwrap(); let stored_time = db .get_burn_block_receive_time(&test_burn_hash) diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 9ffabeed6c5..5fa2afc170b 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -251,7 +251,12 @@ fn reorg_timing_testing( let sortition_time = SystemTime::UNIX_EPOCH + Duration::from_secs(block_info_1.proposed_time + sortition_timing_secs); signer_db - .insert_burn_block(&view.cur_sortition.burn_block_hash, 3, &sortition_time) + .insert_burn_block( + &view.cur_sortition.burn_block_hash, + &view.cur_sortition.consensus_hash, + 3, + &sortition_time, + ) .unwrap(); let MockServerClient { @@ -385,10 +390,11 @@ fn check_block_proposal_timeout() { // Ensure we have a burn height to compare against let burn_hash = view.cur_sortition.burn_block_hash; + let consensus_hash = view.cur_sortition.consensus_hash; let burn_height = 1; let received_time = SystemTime::now(); signer_db - .insert_burn_block(&burn_hash, burn_height, &received_time) + .insert_burn_block(&burn_hash, &consensus_hash, burn_height, &received_time) .unwrap(); view.check_proposal( @@ -456,10 +462,11 @@ fn check_sortition_timeout() { }; // Ensure we have a burn height to compare against let burn_hash = sortition.burn_block_hash; + let consensus_hash = sortition.consensus_hash; let burn_height = 1; let received_time = SystemTime::now(); signer_db - .insert_burn_block(&burn_hash, burn_height, &received_time) + .insert_burn_block(&burn_hash, &consensus_hash, burn_height, &received_time) .unwrap(); std::thread::sleep(Duration::from_secs(1)); diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 0bf5956aaed..69314274200 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -308,11 +308,17 @@ impl SignerTrait for Signer { SignerEvent::NewBurnBlock { burn_height, burn_header_hash, + consensus_hash, received_time, } => { info!("{self}: Received a new burn block event for block height {burn_height}"); self.signer_db - .insert_burn_block(burn_header_hash, *burn_height, received_time) + .insert_burn_block( + burn_header_hash, + consensus_hash, + *burn_height, + received_time, + ) .unwrap_or_else(|e| { error!( "Failed to write burn block event to signerdb"; @@ -328,17 +334,25 @@ impl SignerTrait for Signer { *sortition_state = None; } SignerEvent::NewBlock { - block_hash, block_height, + block_id, + consensus_hash, + signer_sighash, } => { debug!( "{self}: Received a new block event."; - "block_hash" => %block_hash, + "block_id" => %block_id, + "signer_sighash" => %signer_sighash, + "consensus_hash" => %consensus_hash, "block_height" => block_height ); + self.local_state_machine + .stacks_block_arrival(consensus_hash, *block_height, block_id) + .unwrap_or_else(|e| error!("{self}: failed to update local state machine for latest stacks block arrival"; "err" => ?e)); + if let Ok(Some(mut block_info)) = self .signer_db - .block_lookup(block_hash) + .block_lookup(signer_sighash) .inspect_err(|e| warn!("{self}: Failed to load block state: {e:?}")) { if block_info.state == BlockState::GloballyAccepted { diff --git a/stacks-signer/src/v0/signer_state.rs b/stacks-signer/src/v0/signer_state.rs index ba51d92850f..680e42d73b9 100644 --- a/stacks-signer/src/v0/signer_state.rs +++ b/stacks-signer/src/v0/signer_state.rs @@ -13,15 +13,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::time::{Duration, UNIX_EPOCH}; + use blockstack_lib::chainstate::burn::ConsensusHashExtensions; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use serde::{Deserialize, Serialize}; -use slog::slog_warn; +use slog::{slog_info, slog_warn}; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; -use stacks_common::warn; +use stacks_common::{info, warn}; use crate::chainstate::{ ProposalEvalConfig, SignerChainstateError, SortitionState, SortitionsView, @@ -53,6 +55,8 @@ pub enum MinerState { ActiveMiner { /// The pubkeyhash of the current miner's signing key current_miner_pkh: Hash160, + /// The tenure ID of the current miner's active tenure + tenure_id: ConsensusHash, /// The tenure that the current miner is building on top of parent_tenure_id: ConsensusHash, /// The last block of the parent tenure (which should be @@ -120,7 +124,7 @@ impl LocalStateMachine { proposal_config: &ProposalEvalConfig, ) -> Result<(), SignerChainstateError> { let LocalStateMachine::Pending { update, .. } = self else { - return Ok(()); + return self.check_miner_inactivity(db, client, proposal_config); }; match update.clone() { StateMachineUpdate::BurnBlock(expected_burn_height) => { @@ -129,6 +133,91 @@ impl LocalStateMachine { } } + fn is_timed_out( + sortition: &ConsensusHash, + db: &SignerDb, + proposal_config: &ProposalEvalConfig, + ) -> Result { + // if we've already signed a block in this tenure, the miner can't have timed out. + let has_block = db.has_signed_block_in_tenure(sortition)?; + if has_block { + return Ok(false); + } + let Some(received_ts) = db.get_burn_block_receive_time_ch(sortition)? else { + return Ok(false); + }; + let received_time = UNIX_EPOCH + Duration::from_secs(received_ts); + let last_activity = db + .get_last_activity_time(sortition)? + .map(|time| UNIX_EPOCH + Duration::from_secs(time)) + .unwrap_or(received_time); + + let Ok(elapsed) = std::time::SystemTime::now().duration_since(last_activity) else { + return Ok(false); + }; + + if elapsed > proposal_config.block_proposal_timeout { + info!( + "Tenure miner was inactive too long and timed out"; + "tenure_ch" => %sortition, + "elapsed_inactive" => elapsed.as_secs(), + "config_block_proposal_timeout" => proposal_config.block_proposal_timeout.as_secs() + ); + } + Ok(elapsed > proposal_config.block_proposal_timeout) + } + + fn check_miner_inactivity( + &mut self, + db: &SignerDb, + client: &StacksClient, + proposal_config: &ProposalEvalConfig, + ) -> Result<(), SignerChainstateError> { + let Self::Initialized(ref mut state_machine) = self else { + // no inactivity if the state machine isn't initialized + return Ok(()); + }; + + let MinerState::ActiveMiner { ref tenure_id, .. } = state_machine.current_miner else { + // no inactivity if there's no active miner + return Ok(()); + }; + + if !Self::is_timed_out(tenure_id, db, proposal_config)? { + return Ok(()); + } + + // the tenure timed out, try to see if we can use the prior tenure instead + let CurrentAndLastSortition { last_sortition, .. } = + client.get_current_and_last_sortition()?; + let last_sortition = last_sortition + .map(SortitionState::try_from) + .transpose() + .ok() + .flatten(); + let Some(last_sortition) = last_sortition else { + warn!("Current miner timed out due to inactivity, but could not find a valid prior miner. Allowing current miner to continue"); + return Ok(()); + }; + + if Self::is_tenure_valid(&last_sortition, db, client, proposal_config)? { + let new_active_tenure_ch = last_sortition.consensus_hash; + let inactive_tenure_ch = *tenure_id; + state_machine.current_miner = + Self::make_miner_state(last_sortition, client, db, proposal_config)?; + info!( + "Current tenure timed out, setting the active miner to the prior tenure"; + "inactive_tenure_ch" => %inactive_tenure_ch, + "new_active_tenure_ch" => %new_active_tenure_ch + ); + + Ok(()) + } else { + warn!("Current miner timed out due to inactivity, but prior miner is not valid. Allowing current miner to continue"); + Ok(()) + } + } + fn make_miner_state( sortition_to_set: SortitionState, client: &StacksClient, @@ -177,6 +266,7 @@ impl LocalStateMachine { let miner_state = MinerState::ActiveMiner { current_miner_pkh: next_current_miner_pkh, + tenure_id: sortition_to_set.consensus_hash, parent_tenure_id: next_parent_tenure_id, parent_tenure_last_block, parent_tenure_last_block_height, @@ -185,6 +275,107 @@ impl LocalStateMachine { Ok(miner_state) } + /// Handle a new stacks block arrival + pub fn stacks_block_arrival( + &mut self, + ch: &ConsensusHash, + height: u64, + block_id: &StacksBlockId, + ) -> Result<(), SignerChainstateError> { + // set self to uninitialized so that if this function errors, + // self is left as uninitialized. + let prior_state = std::mem::replace(self, Self::Uninitialized); + let mut prior_state_machine = match prior_state { + // if the local state machine was uninitialized, just initialize it + LocalStateMachine::Initialized(signer_state_machine) => signer_state_machine, + LocalStateMachine::Uninitialized => { + // we don't need to update any state when we're uninitialized for new stacks block + // arrivals + return Ok(()); + } + LocalStateMachine::Pending { update, prior } => { + // This works as long as the pending updates are only burn blocks, + // but if we have other kinds of pending updates, this logic will need + // to be changed. + match &update { + StateMachineUpdate::BurnBlock(..) => { + *self = LocalStateMachine::Pending { update, prior }; + return Ok(()); + } + } + } + }; + + let MinerState::ActiveMiner { + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + .. + } = &mut prior_state_machine.current_miner + else { + // if there's no valid miner, then we don't need to update any state for new stacks blocks + *self = LocalStateMachine::Initialized(prior_state_machine); + return Ok(()); + }; + + if parent_tenure_id != ch { + // if the new block isn't from the parent tenure, we don't need any updates + *self = LocalStateMachine::Initialized(prior_state_machine); + return Ok(()); + } + + if height <= *parent_tenure_last_block_height { + // if the new block isn't higher than we already expected, we don't need any updates + *self = LocalStateMachine::Initialized(prior_state_machine); + return Ok(()); + } + + *parent_tenure_last_block = *block_id; + *parent_tenure_last_block_height = height; + *self = LocalStateMachine::Initialized(prior_state_machine); + Ok(()) + } + + /// check if the tenure defined by sortition state: + /// (1) chose an appropriate parent tenure + /// (2) has not "timed out" + fn is_tenure_valid( + sortition_state: &SortitionState, + signer_db: &SignerDb, + client: &StacksClient, + proposal_config: &ProposalEvalConfig, + ) -> Result { + let standin_block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 0, + chain_length: 0, + burn_spent: 0, + consensus_hash: sortition_state.consensus_hash, + parent_block_id: StacksBlockId::first_mined(), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 0, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }; + + let chose_good_parent = SortitionsView::check_parent_tenure_choice( + sortition_state, + &standin_block, + signer_db, + client, + &proposal_config.first_proposal_burn_block_timing, + )?; + if !chose_good_parent { + return Ok(false); + } + Self::is_timed_out(&sortition_state.consensus_hash, signer_db, proposal_config) + .map(|timed_out| !timed_out) + } + /// Handle a new bitcoin block arrival pub fn bitcoin_block_arrival( &mut self, @@ -250,40 +441,13 @@ impl LocalStateMachine { ) })?; - let standin_block = NakamotoBlock { - header: NakamotoBlockHeader { - version: 0, - chain_length: 0, - burn_spent: 0, - consensus_hash: cur_sortition.consensus_hash.clone(), - parent_block_id: StacksBlockId::first_mined(), - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash([0; 32]), - timestamp: 0, - miner_signature: MessageSignature::empty(), - signer_signature: vec![], - pox_treatment: BitVec::ones(1).unwrap(), - }, - txs: vec![], - }; - let is_current_valid = SortitionsView::check_parent_tenure_choice( - &cur_sortition, - &standin_block, - &db, - &client, - &proposal_config.first_proposal_burn_block_timing, - )?; + let is_current_valid = Self::is_tenure_valid(&cur_sortition, db, client, proposal_config)?; let miner_state = if is_current_valid { Self::make_miner_state(cur_sortition, client, db, proposal_config)? } else { - let is_last_valid = SortitionsView::check_parent_tenure_choice( - &last_sortition, - &standin_block, - &db, - &client, - &proposal_config.first_proposal_burn_block_timing, - )?; + let is_last_valid = + Self::is_tenure_valid(&last_sortition, db, client, proposal_config)?; if is_last_valid { Self::make_miner_state(last_sortition, client, db, proposal_config)? diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a9f27865660..87a54522ed4 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -50,8 +50,11 @@ use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::v0::signer_state::{LocalStateMachine, MinerState}; use stacks_signer::{Signer, SpawnedSigner}; -use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; -use super::neon_integrations::get_sortition_info_ch; +use super::make_stacks_transfer; +use super::nakamoto_integrations::{ + check_nakamoto_empty_block_heuristics, next_block_and, wait_for, +}; +use super::neon_integrations::{get_account, get_sortition_info_ch, submit_tx_fallible}; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; @@ -295,6 +298,14 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest info.burn_block_height) + }) + .unwrap(); + } + /// Fetch the local signer state machine for all the signers, /// waiting until every signer has processed the latest burn block. /// Then, check that every signer's state machine corresponds to the @@ -303,13 +314,153 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ?state_machine); + + assert_eq!(state_machine.burn_block, info_cur.pox_consensus,); + assert_eq!(state_machine.burn_block_height, info_cur.burn_block_height,); + let MinerState::ActiveMiner { + current_miner_pkh, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + .. + } = state_machine.current_miner + else { + error!("State machine for Signer #{ix} did not have an active miner"); + panic!(); + }; + assert_eq!(Some(current_miner_pkh), sortition_latest.miner_pk_hash160); + assert_eq!(parent_tenure_id, sortition_prior.consensus_hash); + assert_eq!(parent_tenure_last_block, latest_block_id); + assert_eq!(parent_tenure_last_block_height, latest_block.height()); + }); + } + + /// Fetch the local signer state machine for all the signers, + /// waiting until every signer has processed the latest burn block. + /// Then, check that every signer's state machine corresponds to the + /// latest burn block: + /// 1. Having an invalid miner + /// 2. The active miner is the winner of the prior sortition + pub fn check_signer_states_revert_to_prior(&mut self) { + let (state_machines, info_cur) = self.get_burn_updated_states(); + + let sortition_latest = + get_sortition_info_ch(&self.running_nodes.conf, &info_cur.pox_consensus); + let sortition_prior = get_sortition_info_ch( + &self.running_nodes.conf, + sortition_latest.last_sortition_ch.as_ref().unwrap(), + ); + + info!("Latest sortition: {sortition_latest:?}"); + info!("Prior sortition: {sortition_prior:?}"); + + let latest_block = self + .stacks_client + .get_tenure_tip(sortition_prior.stacks_parent_ch.as_ref().unwrap()) + .unwrap(); + let latest_block_id = StacksBlockId::new( + sortition_prior.stacks_parent_ch.as_ref().unwrap(), + &latest_block.block_hash(), + ); + + state_machines + .into_iter() + .enumerate() + .for_each(|(ix, state_machine)| { + let LocalStateMachine::Initialized(state_machine) = state_machine else { + error!("Local state machine was not initialized"); + panic!(); + }; + + info!("Evaluating Signer #{ix}"; "state_machine" => ?state_machine); + + assert_eq!(state_machine.burn_block, info_cur.pox_consensus,); + assert_eq!(state_machine.burn_block_height, info_cur.burn_block_height,); + let MinerState::ActiveMiner { + current_miner_pkh, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + tenure_id, + } = state_machine.current_miner + else { + error!("State machine for Signer #{ix} did not have an active miner"); + panic!(); + }; + assert_eq!(tenure_id, sortition_prior.consensus_hash); + assert_eq!(Some(current_miner_pkh), sortition_prior.miner_pk_hash160); + assert_eq!(Some(parent_tenure_id), sortition_prior.stacks_parent_ch); + assert_eq!(parent_tenure_last_block, latest_block_id); + assert_eq!(parent_tenure_last_block_height, latest_block.height()); + }); + } + + /// Submit a stacks transfer just to trigger block production + pub fn submit_transfer_tx( + &mut self, + sender_sk: &StacksPrivateKey, + send_fee: u64, + send_amt: u64, + ) -> Result<(String, u64), String> { + let http_origin = format!("http://{}", &self.running_nodes.conf.node.rpc_bind); + let sender_addr = to_addr(&sender_sk); + let sender_nonce = get_account(&http_origin, &sender_addr).nonce; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + self.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx_fallible(&http_origin, &transfer_tx).map(|resp| (resp, sender_nonce)) + } + + /// Get the local state machines and most recent peer info from the stacks-node, + /// waiting until all of the signers have updated their state machines to + /// reflect the most recent burn block. + pub fn get_burn_updated_states(&mut self) -> (Vec, PeerInfo) { let info_cur = self.get_peer_info(); let current_rc = self.get_current_reward_cycle(); let mut states = Vec::with_capacity(0); wait_for(120, || { states = self.get_all_states(); Ok(states.iter().enumerate().all(|(ix, signer_state)| { - let state_machine = signer_state + let Some(Some(state_machine)) = signer_state .signer_state_machines .iter() .find_map(|(rc, state)| { @@ -319,10 +470,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest = accepting_reorg .iter() .map(|pk| { @@ -418,33 +603,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest= info_cur.burn_block_height - })) - - }) - .expect("Timed out while waiting to fetch local state machines from the signer set"); + let (state_machines, info_cur) = self.get_burn_updated_states(); let sortition_latest = get_sortition_info_ch(&self.running_nodes.conf, &info_cur.pox_consensus); @@ -464,19 +623,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>() } + /// Get the signer public keys by directly computing them from this signer test's + /// signer private keys. + pub fn signer_test_pks(&self) -> Vec { + self.signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect() + } + /// Get the signer public keys for the given reward cycle fn get_signer_public_keys(&self, reward_cycle: u64) -> Vec { let entries = self.get_reward_set_signers(reward_cycle); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7a9f69aef5d..5f0328971c6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -69,7 +69,6 @@ use stacks_signer::client::{SignerSlotID, StackerDB}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::signerdb::SignerDb; use stacks_signer::v0::signer::TEST_REPEAT_PROPOSAL_RESPONSE; -use stacks_signer::v0::signer_state::LocalStateMachine; use stacks_signer::v0::tests::{ TEST_IGNORE_ALL_BLOCK_PROPOSALS, TEST_PAUSE_BLOCK_BROADCAST, TEST_REJECT_ALL_BLOCK_PROPOSAL, TEST_SKIP_BLOCK_BROADCAST, TEST_SKIP_SIGNER_CLEANUP, TEST_STALL_BLOCK_VALIDATION_SUBMISSION, @@ -314,6 +313,7 @@ impl SignerTest { let reward_cycle = self.get_current_reward_cycle(); self.mine_nakamoto_block(timeout, use_nakamoto_blocks_mined); + self.check_signer_states_normal(); // Verify that the signers accepted the proposed block, sending back a validate ok response let proposed_signer_signature_hash = self @@ -1407,6 +1407,7 @@ fn miner_gather_signatures() { info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); + signer_test.check_signer_states_normal(); // Test prometheus metrics response #[cfg(feature = "monitoring_prom")] @@ -1960,6 +1961,8 @@ fn forked_tenure_testing( ) .unwrap(); + signer_test.check_signer_states_normal(); + sleep_ms(1000); let tip_a = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -1984,6 +1987,8 @@ fn forked_tenure_testing( ) .unwrap(); + signer_test.check_signer_states_normal(); + info!("Commit op is submitted; unpause tenure B's block"); // Unpause the broadcast of Tenure B's block, do not submit commits. @@ -2115,6 +2120,13 @@ fn forked_tenure_testing( panic!(); }); + let signer_pks = signer_test.signer_test_pks(); + if expect_tenure_c { + signer_test.check_signer_states_reorg(&signer_pks, &[]); + } else { + signer_test.check_signer_states_reorg(&[], &signer_pks); + }; + // allow blocks B and C to be processed sleep_ms(1000); @@ -2182,6 +2194,12 @@ fn forked_tenure_testing( // Mine tenure D signer_test.mine_nakamoto_block(Duration::from_secs(60), false); + if expect_tenure_c { + signer_test.check_signer_states_normal(); + } else { + signer_test.check_signer_states_reorg(&signer_pks, &[]); + } + let tip_d = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); @@ -2266,6 +2284,7 @@ fn bitcoind_forking_test() { for i in 0..pre_fork_tenures { info!("Mining pre-fork tenure {} of {pre_fork_tenures}", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); } let pre_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2288,6 +2307,9 @@ fn bitcoind_forking_test() { .btc_regtest_controller .build_next_block(1); + // note, we should still have normal signer states! + signer_test.check_signer_states_normal(); + info!("Wait for block off of shallow fork"); TEST_MINE_STALL.set(true); @@ -2332,6 +2354,7 @@ fn bitcoind_forking_test() { }, ) .unwrap(); + signer_test.check_signer_states_normal_missed_sortition(); } let post_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2343,6 +2366,11 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + if i == 0 { + signer_test.check_signer_states_reorg(&signer_test.signer_test_pks(), &[]); + } else { + signer_test.check_signer_states_normal(); + } } let pre_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2369,6 +2397,7 @@ fn bitcoind_forking_test() { .btc_regtest_controller .build_next_block(4); + signer_test.check_signer_states_normal(); info!("Wait for block off of deep fork"); let commits_submitted = signer_test @@ -2411,6 +2440,7 @@ fn bitcoind_forking_test() { }, ) .unwrap(); + signer_test.check_signer_states_normal_missed_sortition(); } let post_fork_2_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2422,6 +2452,11 @@ fn bitcoind_forking_test() { for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + if i == 0 { + signer_test.check_signer_states_reorg(&signer_test.signer_test_pks(), &[]); + } else { + signer_test.check_signer_states_normal(); + } } let test_end_nonce = get_account(&http_origin, &miner_address).nonce; @@ -2487,6 +2522,8 @@ fn multiple_miners() { Duration::from_secs(30), ); + miners.signer_test.check_signer_states_normal(); + btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf_1); // for this test, there should be one block per tenure @@ -2652,6 +2689,7 @@ fn miner_forking() { .expect("Failed to mine BTC block."); miners.wait_for_chains(120); + miners.signer_test.check_signer_states_normal(); // make sure the tenure was won by RL1 verify_sortition_winner(&sortdb, &mining_pkh_1); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -2698,6 +2736,9 @@ fn miner_forking() { miners .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::Extended, 60) .expect("Failed to mine BTC block followed by tenure change tx."); + miners + .signer_test + .check_signer_states_reorg(&[], &miners.signer_test.signer_test_pks()); miners.wait_for_chains(120); // fetch the current sortition info let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -2740,6 +2781,9 @@ fn miner_forking() { miners .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 60) .expect("Failed to mine BTC block."); + miners + .signer_test + .check_signer_states_reorg(&miners.signer_test.signer_test_pks(), &[]); miners.submit_commit_miner_1(&sortdb); // unblock block mining let blocks_len = test_observer::get_blocks().len(); @@ -2756,6 +2800,7 @@ fn miner_forking() { miners .mine_bitcoin_blocks_and_confirm_with_test_observer(&sortdb, 1, 60) .expect("Failed to mine BTC block."); + miners.signer_test.check_signer_states_normal(); // fetch the current sortition info miners.wait_for_chains(120); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -2874,10 +2919,9 @@ fn end_of_tenure() { TEST_VALIDATE_STALL.set(true); let proposals_before = proposed_blocks.load(Ordering::SeqCst); - let blocks_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + let info = signer_test.get_peer_info(); + let blocks_before = info.stacks_tip_height; - let info = get_chain_info(&signer_test.running_nodes.conf); - let start_height = info.stacks_tip_height; // submit a tx so that the miner will mine an extra block let sender_nonce = 0; let transfer_tx = make_stacks_transfer( @@ -2943,7 +2987,7 @@ fn end_of_tenure() { .expect("Timed out waiting for block to be mined"); let info = get_chain_info(&signer_test.running_nodes.conf); - assert_eq!(info.stacks_tip_height, start_height + 1); + assert_eq!(info.stacks_tip_height, blocks_before + 1); signer_test.shutdown(); } @@ -3102,6 +3146,7 @@ fn signers_broadcast_signed_blocks() { .clone(); let blocks_before = mined_blocks.load(Ordering::SeqCst); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); wait_for(30, || { let blocks_mined = mined_blocks.load(Ordering::SeqCst); @@ -3186,6 +3231,7 @@ fn tenure_extend_after_idle_signers() { info!("---- Nakamoto booted, starting test ----"); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); info!("---- Waiting for a tenure extend ----"); @@ -3242,6 +3288,7 @@ fn tenure_extend_with_other_transactions() { info!("---- Nakamoto booted, starting test ----"); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); info!("Pause miner so it doesn't propose a block before the tenure extend"); TEST_MINE_STALL.set(true); @@ -3347,6 +3394,7 @@ fn tenure_extend_after_idle_miner() { info!("---- Nakamoto booted, starting test ----"); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); info!("---- Start a new tenure but ignore block signatures so no timestamps are recorded ----"); let tip_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; @@ -3423,6 +3471,7 @@ fn tenure_extend_succeeds_after_rejected_attempt() { info!("---- Nakamoto booted, starting test ----"); let stacks_tip_height = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + signer_test.check_signer_states_normal(); info!("---- Waiting for a rejected tenure extend ----"); // Now, wait for a block with a tenure extend proposal from the miner, but ensure it is rejected. @@ -3838,11 +3887,9 @@ fn idle_tenure_extend_active_mining() { #[test] #[ignore] /// This test checks the behaviour of signers when a sortition is empty. Specifically: -/// - An empty sortition will cause the signers to mark a miner as misbehaving once a timeout is exceeded. +/// - An empty tenure will cause the signers to mark a miner as misbehaving once a timeout is exceeded. /// - The miner will stop trying to mine once it sees a threshold of signers reject the block -/// - The empty sortition will trigger the miner to attempt a tenure extend. -/// - Signers will accept the tenure extend and sign subsequent blocks built off the old sortition -fn empty_sortition() { +fn empty_tenure_delayed() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -3867,7 +3914,9 @@ fn empty_sortition() { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; }, - |_| {}, + |node_config| { + node_config.miner.block_commit_delay = Duration::from_secs(2); + }, None, None, ); @@ -3881,16 +3930,13 @@ fn empty_sortition() { let Counters { naka_mined_blocks: mined_blocks, naka_submitted_commits: submitted_commits, - naka_skip_commit_op: skip_commit_op, naka_rejected_blocks: rejected_blocks, .. } = signer_test.running_nodes.counters.clone(); - TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk]); - info!("------------------------- Test Mine Regular Tenure A -------------------------"); let commits_before = submitted_commits.load(Ordering::SeqCst); - // Mine a regular tenure + // Mine a regular tenure, but wait for commits to be submitted next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -3900,11 +3946,14 @@ fn empty_sortition() { }, ) .unwrap(); + signer_test.check_signer_states_normal(); info!("------------------------- Test Mine Empty Tenure B -------------------------"); - info!("Pausing stacks block mining to trigger an empty sortition."); let blocks_before = mined_blocks.load(Ordering::SeqCst); let commits_before = submitted_commits.load(Ordering::SeqCst); + info!("Pausing stacks block proposal to force an empty tenure"); + TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk]); + // Start new Tenure B // In the next block, the miner should win the tenure next_block_and( @@ -3916,12 +3965,7 @@ fn empty_sortition() { }, ) .unwrap(); - - info!("Pausing stacks block proposal to force an empty tenure"); - TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk]); - - info!("Pausing commit op to prevent tenure C from starting..."); - skip_commit_op.set(true); + signer_test.check_signer_states_normal(); let blocks_after = mined_blocks.load(Ordering::SeqCst); assert_eq!(blocks_after, blocks_before); @@ -3942,6 +3986,8 @@ fn empty_sortition() { std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); + signer_test.check_signer_states_revert_to_prior(); + TEST_BROADCAST_PROPOSAL_STALL.set(vec![]); info!("------------------------- Test Delayed Block is Rejected -------------------------"); @@ -4082,6 +4128,7 @@ fn empty_sortition_before_approval() { || Ok(proposed_blocks.load(Ordering::SeqCst) > proposed_before), ) .expect("Failed to mine tenure A and propose a block"); + signer_test.check_signer_states_normal(); info!("------------------------- Test Mine Empty Tenure B -------------------------"); @@ -4095,6 +4142,7 @@ fn empty_sortition_before_approval() { }, ) .expect("Failed to mine empty tenure"); + signer_test.check_signer_states_normal_missed_sortition(); info!("Unpause block commits"); skip_commit_op.set(false); @@ -4226,8 +4274,7 @@ fn empty_sortition_before_proposal() { }) .expect("Failed to advance chain tip"); - // Sleep a bit more to ensure the signers see both burn blocks - sleep_ms(5_000); + signer_test.check_signer_states_normal_missed_sortition(); info!("Unpause miner"); TEST_MINE_STALL.set(false); @@ -4302,6 +4349,7 @@ fn empty_sortition_before_proposal() { &signer_test.running_nodes.coord_channel, ) .expect("Failed to mine a normal tenure after the tenure extend"); + signer_test.check_signer_states_normal(); info!("------------------------- Shutdown -------------------------"); @@ -4695,6 +4743,7 @@ fn signer_set_rollover() { ); submit_tx(&http_origin, &transfer_tx); signer_test.mine_nakamoto_block(short_timeout, true); + signer_test.check_signer_states_normal(); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); let block_sighash = mined_block.signer_signature_hash; let signer_signatures = mined_block.signer_signature; @@ -4769,6 +4818,7 @@ fn signer_set_rollover() { .expect("Timed out waiting for stacking txs to be mined"); signer_test.mine_nakamoto_block(short_timeout, true); + signer_test.check_signer_states_normal(); let next_reward_cycle = reward_cycle.saturating_add(1); @@ -4822,6 +4872,7 @@ fn signer_set_rollover() { ); submit_tx(&http_origin, &transfer_tx); signer_test.mine_nakamoto_block(short_timeout, true); + signer_test.check_signer_states_normal(); let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); info!("---- Verifying that the new signers signed the block -----"); @@ -5099,6 +5150,7 @@ fn multiple_miners_with_nakamoto_blocks() { &[&rl1_counters, &rl2_counters], Duration::from_secs(30), ); + miners.signer_test.check_signer_states_normal(); btc_blocks_mined += 1; // wait for the new block to be processed @@ -5193,7 +5245,6 @@ fn partial_tenure_fork() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let btc_miner_1_seed = vec![1, 1, 1, 1]; let btc_miner_2_seed = vec![2, 2, 2, 2]; @@ -5252,7 +5303,6 @@ fn partial_tenure_fork() { Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); - let blocks_mined1 = signer_test.running_nodes.counters.naka_mined_blocks.clone(); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); @@ -5291,9 +5341,6 @@ fn partial_tenure_fork() { let rl2_coord_channels = run_loop_2.coordinator_channels(); let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let Counters { - naka_mined_blocks: blocks_mined2, - naka_proposed_blocks: blocks_proposed2, - naka_submitted_commits: commits_2, naka_skip_commit_op: rl2_skip_commit_op, .. } = run_loop_2.counters(); @@ -5301,13 +5348,16 @@ fn partial_tenure_fork() { let rl1_counters = signer_test.running_nodes.counters.clone(); signer_test.boot_to_epoch_3(); + + // Pause block commits from miner 2 to make sure + // miner 1 wins the first block + rl2_skip_commit_op.set(true); + let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) .unwrap(); - let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; - wait_for(200, || { let Some(node_1_info) = get_chain_info_opt(&conf) else { return Ok(false); @@ -5321,29 +5371,6 @@ fn partial_tenure_fork() { info!("------------------------- Reached Epoch 3.0 -------------------------"); - // due to the random nature of mining sortitions, the way this test is structured - // is that we keep track of how many tenures each miner produced, and once enough sortitions - // have been produced such that each miner has produced 3 tenures, we stop and check the - // results at the end - let mut btc_blocks_mined = 0; - let mut miner_1_tenures = 0u64; - let mut miner_2_tenures = 0u64; - let mut fork_initiated = false; - let mut min_miner_1_tenures = u64::MAX; - let mut min_miner_2_tenures = u64::MAX; - let mut ignore_block = 0; - - let mut miner_1_blocks = 0; - let mut miner_2_blocks = 0; - let mut min_miner_2_blocks = 0; - let mut last_sortition_winner: Option = None; - let mut miner_2_won_2_in_a_row = false; - - let commits_1 = signer_test - .running_nodes - .counters - .naka_submitted_commits - .clone(); let rl1_skip_commit_op = signer_test .running_nodes .counters @@ -5370,278 +5397,159 @@ fn partial_tenure_fork() { info!("-------- Miner 2 caught up to miner 1 --------"); - // Pause block commits - rl1_skip_commit_op.set(true); - rl2_skip_commit_op.set(true); - - let info_before = get_chain_info(&conf); - - // Mine the first block - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 180, - || { - let info_1 = get_chain_info(&conf); - Ok(info_1.stacks_tip_height > info_before.stacks_tip_height) - }, - ) - .expect("Timed out waiting for new Stacks block to be mined"); - - info!("-------- Mined first block, wait for block commits --------"); - let info_before = get_chain_info(&conf); - // Unpause block commits and wait for both miners' commits - rl1_skip_commit_op.set(false); - rl2_skip_commit_op.set(false); + info!("-------- Miner 1 starting next tenure --------"); - // Ensure that both miners' commits point at the stacks tip wait_for(60, || { - let last_committed_1 = rl1_counters - .naka_submitted_commit_last_stacks_tip - .load(Ordering::SeqCst); - let last_committed_2 = rl2_counters - .naka_submitted_commit_last_stacks_tip - .load(Ordering::SeqCst); - Ok(last_committed_1 >= info_before.stacks_tip_height - && last_committed_2 >= info_before.stacks_tip_height) + Ok(rl1_counters.naka_submitted_commit_last_burn_height.get() + >= info_before.burn_block_height) }) - .expect("Timed out waiting for block commits"); + .unwrap(); + info!("-------- Blocking Miner 1 so that Miner 2 will win the next next tenure --------"); + rl1_skip_commit_op.set(true); - while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { - if btc_blocks_mined >= max_nakamoto_tenures { - panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); - } + // Mine the first block + signer_test.mine_bitcoin_block(); + signer_test.check_signer_states_normal(); - // Mine a block and wait for it to be processed, unless we are in a - // forked tenure, in which case, just wait for the block proposal - let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); - let proposed_before_1 = signer_test - .running_nodes - .counters - .naka_proposed_blocks - .load(Ordering::SeqCst); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_1)); + + // Setup miner 2 to ignore a block in this tenure + let ignore_block = info_before.stacks_tip_height + 3; + set_ignore_block(ignore_block, &conf_node_2.node.working_dir); + // mine the interim blocks + for interim_block_ix in 0..inter_blocks_per_tenure { info!( - "Next tenure checking"; - "fork_initiated?" => fork_initiated, - "miner_1_tenures" => miner_1_tenures, - "miner_2_tenures" => miner_2_tenures, - "min_miner_1_tenures" => min_miner_2_tenures, - "min_miner_2_tenures" => min_miner_2_tenures, - "proposed_before_1" => proposed_before_1, - "proposed_before_2" => proposed_before_2, - "mined_before_1" => mined_before_1, - "mined_before_2" => mined_before_2, + "Mining interim block #{interim_block_ix} in Miner 1's first tenure (the to-be-forked tenure)"; ); - // Pause block commits - rl1_skip_commit_op.set(true); - rl2_skip_commit_op.set(true); - - let tip_before = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let commits_before_1 = commits_1.load(Ordering::SeqCst); - let commits_before_2 = commits_2.load(Ordering::SeqCst); - - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - - Ok((fork_initiated && proposed_2 > proposed_before_2) - || mined_1 > mined_before_1 - || mined_2 > mined_before_2) - }, - ) - .expect("Timed out waiting for tenure change Stacks block"); - btc_blocks_mined += 1; - - // Unpause block commits - info!("Unpausing block commits"); - rl1_skip_commit_op.set(false); - rl2_skip_commit_op.set(false); + let (_, sender_nonce) = signer_test + .submit_transfer_tx(&sender_sk, send_fee, send_amt) + .unwrap(); - // Wait for the block to be processed and the block commits to be submitted wait_for(60, || { - let tip_after = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - // Ensure that both block commits have been sent before continuing - let commits_after_1 = commits_1.load(Ordering::SeqCst); - let commits_after_2 = commits_2.load(Ordering::SeqCst); - Ok(commits_after_1 > commits_before_1 - && commits_after_2 > commits_before_2 - && tip_after.consensus_hash != tip_before.consensus_hash) + Ok(get_account(&http_origin, &sender_addr).nonce > sender_nonce) }) - .expect("Sortition DB tip did not change"); - - let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - info!("tip_after: {:?}", tip_sn); - let miner = match tip_sn.miner_pk_hash { - Some(pk_hash) => { - if pk_hash == mining_pkh_1 { - 1 - } else { - 2 - } - } - None => { - panic!("No sortition found"); - } - }; - info!("Next tenure mined by miner {miner}"); + .unwrap(); + } - if let Some(last_sortition_winner) = last_sortition_winner { - if last_sortition_winner == miner && miner == 2 { - miner_2_won_2_in_a_row = true; - } else { - miner_2_won_2_in_a_row = false; - } - } - last_sortition_winner = Some(miner); - - if miner == 1 && miner_1_tenures == 0 { - // Setup miner 2 to ignore a block in this tenure - ignore_block = pre_nakamoto_peer_1_height - + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) - + 3; - set_ignore_block(ignore_block, &conf_node_2.node.working_dir); - - // Ensure that miner 2 runs at least one more tenure - min_miner_2_tenures = miner_2_tenures + 1; - fork_initiated = true; - min_miner_2_blocks = miner_2_blocks; - } - if miner == 2 && miner_2_tenures == min_miner_2_tenures { - // This is the forking tenure. Ensure that miner 1 runs one more - // tenure after this to validate that it continues to build off of - // the proper block. - min_miner_1_tenures = miner_1_tenures + 1; - } + info!("------- Unblocking Miner 2 ------"); + rl2_skip_commit_op.set(false); + wait_for(60, || { + Ok(rl2_counters.naka_submitted_commit_last_burn_height.get() + > info_before.burn_block_height + && rl2_counters.naka_submitted_commit_last_stacks_tip.get() + > info_before.stacks_tip_height) + }) + .unwrap(); + let proposals_before = rl2_counters.naka_proposed_blocks.get(); + let rejections_before = rl2_counters.naka_rejected_blocks.get(); + let peer_info_before = signer_test.get_peer_info(); + info!("------- Miner 2 wins first tenure post-fork ------"); + signer_test.mine_bitcoin_block(); + // Miner 2's tenure is "normal", even though it will end up being rejected by signers because miner 2 + // is trying to reorg Miner 1's tenure + signer_test.check_signer_states_normal(); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_2)); - let mut blocks = inter_blocks_per_tenure; - // mine (or attempt to mine) the interim blocks - for interim_block_ix in 0..inter_blocks_per_tenure { - let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_before_2 = blocks_proposed2.load(Ordering::SeqCst); + wait_for(60, || { + Ok(rl2_counters.naka_proposed_blocks.get() > proposals_before + && rl2_counters.naka_rejected_blocks.get() > rejections_before) + }) + .expect("Miner 2 should propose blocks that get rejected"); - info!( - "Mining interim blocks"; - "fork_initiated?" => fork_initiated, - "miner_1_tenures" => miner_1_tenures, - "miner_2_tenures" => miner_2_tenures, - "min_miner_1_tenures" => min_miner_2_tenures, - "min_miner_2_tenures" => min_miner_2_tenures, - "proposed_before_2" => proposed_before_2, - "mined_before_1" => mined_before_1, - "mined_before_2" => mined_before_2, - ); + let peer_info = signer_test.get_peer_info(); + assert_eq!( + peer_info.stacks_tip_height, + peer_info_before.stacks_tip_height + ); + wait_for(60, || { + Ok( + rl2_counters.naka_submitted_commit_last_burn_height.get() + >= peer_info.burn_block_height, + ) + }) + .unwrap(); - // submit a tx so that the miner will mine an extra block - let sender_nonce = (btc_blocks_mined - 1) * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - // This may fail if the forking miner wins too many tenures and this account's - // nonces get too high (TooMuchChaining) - match submit_tx_fallible(&http_origin, &transfer_tx) { - Ok(_) => { - wait_for(60, || { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - - Ok((fork_initiated && proposed_2 > proposed_before_2) - || mined_1 > mined_before_1 - || mined_2 > mined_before_2 - // Special case where neither miner can mine a block: - || (fork_initiated && miner_2_won_2_in_a_row)) - }) - .expect("Timed out waiting for interim block to be mined"); - } - Err(e) => { - if e.to_string().contains("TooMuchChaining") { - info!("TooMuchChaining error, skipping block"); - blocks = interim_block_ix; - break; - } else { - panic!("Failed to submit tx: {e}"); - } - } - } - info!("Attempted to mine interim block {btc_blocks_mined}:{interim_block_ix}"); - } + info!("------- Miner 2 wins second tenure post-fork ------"); + rl2_skip_commit_op.set(true); + signer_test.mine_bitcoin_block(); + info!("------- Unblocking Miner 1 so they can win the next tenure ------"); + rl1_skip_commit_op.set(false); - if miner == 1 { - miner_1_tenures += 1; - miner_1_blocks += blocks; - } else { - miner_2_tenures += 1; - miner_2_blocks += blocks; - } + // Miner 2's tenure is an allowed reorg before the prior tenure had no blocks + signer_test.check_signer_states_reorg(&signer_test.signer_test_pks(), &[]); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_2)); + + let peer_info = signer_test.get_peer_info(); + assert_eq!( + peer_info.stacks_tip_height, + peer_info_before.stacks_tip_height + ); + wait_for(60, || { + Ok( + rl1_counters.naka_submitted_commit_last_burn_height.get() + >= peer_info.burn_block_height, + ) + }) + .unwrap(); - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); + rl1_skip_commit_op.set(true); + info!("------- Miner 1 wins the third tenure post-fork ------"); + signer_test.mine_bitcoin_block(); + info!("------- Unblocking Miner 2 so they can win the next tenure ------"); + rl2_skip_commit_op.set(false); + signer_test.check_signer_states_reorg(&signer_test.signer_test_pks(), &[]); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_1)); + for interim_block_ix in 0..inter_blocks_per_tenure { info!( - "Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}, Miner 1 before: {mined_before_1}, Miner 2 before: {mined_before_2}, Miner 1 blocks: {mined_1}, Miner 2 blocks: {mined_2}", + "Mining interim block #{interim_block_ix} in Miner 1's first tenure (the to-be-forked tenure)"; ); - if miner == 1 { - assert_eq!(mined_1, mined_before_1 + blocks + 1); - } else if miner_2_tenures < min_miner_2_tenures { - assert_eq!(mined_2, mined_before_2 + blocks + 1); - } else { - // Miner 2 should have mined 0 blocks after the fork - assert_eq!(mined_2, mined_before_2); - } + let (_, sender_nonce) = signer_test + .submit_transfer_tx(&sender_sk, send_fee, send_amt) + .unwrap(); + + wait_for(60, || { + Ok(get_account(&http_origin, &sender_addr).nonce > sender_nonce) + }) + .unwrap(); } - info!( - "New chain info 1: {:?}", - get_chain_info(&signer_test.running_nodes.conf) - ); + info!("------- Miner 2 wins the fourth tenure post-fork ------"); + let proposals_before = rl2_counters.naka_proposed_blocks.get(); + let mined_before = rl2_counters.naka_mined_blocks.get(); + let peer_info_before = signer_test.get_peer_info(); + signer_test.mine_bitcoin_block(); + // now, miner 2 is reorging an entire miner 1 tenure, which should lead + // the signer set to treat miner 2's reorg as rejected. + signer_test.check_signer_states_reorg(&[], &signer_test.signer_test_pks()); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_2)); - info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + wait_for(60, || { + Ok(rl2_counters.naka_proposed_blocks.get() > proposals_before) + }) + .expect("Miner 2 should propose blocks that get rejected"); - let peer_1_height = get_chain_info(&conf).stacks_tip_height; - let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; - assert_eq!(peer_2_height, ignore_block - 1); - // The height may be higher than expected due to extra transactions waiting - // to be mined during the forking miner's tenure. - // We cannot guarantee due to TooMuchChaining that the miner will mine inter_blocks_per_tenure - // Must be at least the number of blocks mined by miner 1 and the number of blocks mined by miner 2 - // before the fork was initiated - assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_miner_2_blocks); - assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); + wait_for(120, || { + Ok(signer_test.get_peer_info().stacks_tip_height > peer_info_before.stacks_tip_height) + }) + .expect("Miner 1 should submit a tenure extend and have it globally accepted"); - let sortdb = SortitionDB::open( - &conf_node_2.get_burn_db_file_path(), - false, - conf_node_2.get_burnchain().pox_constants, - ) - .unwrap(); + assert_eq!( + mined_before, + rl2_counters.naka_mined_blocks.get(), + "Miner 2 should not have mined any new blocks" + ); - let (chainstate, _) = StacksChainState::open( - false, - conf_node_2.burnchain.chain_id, - &conf_node_2.get_chainstate_path_str(), - None, - ) - .unwrap(); - let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() - .unwrap(); - assert_eq!(tip.stacks_block_height, ignore_block - 1); rl2_coord_channels .lock() .expect("Mutex poisoned") From 815a175e136fc6463f539ab9d7021fbafe6e9e1e Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 14 Mar 2025 13:01:15 +0100 Subject: [PATCH 152/238] chore: update contributing.md adding details about a product oriented changelog, #5939 --- CONTRIBUTING.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7c79fc286c8..577d417c2c5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -48,7 +48,7 @@ For an example of this process, see PRs ### Documentation Updates -- Any major changes should be added to the [CHANGELOG](CHANGELOG.md). +- Any major changes should be added to the [CHANGELOG](CHANGELOG.md)[*]. - Mention any required documentation changes in the description of your pull request. - If adding or updating an RPC endpoint, ensure the change is documented in the OpenAPI spec: [`./docs/rpc/openapi.yaml`](./docs/rpc/openapi.yaml). @@ -56,6 +56,9 @@ For an example of this process, see PRs test, module, function, etc.), each should be documented according to our [coding guidelines](#Coding-Guidelines). +> [*] The Changelog focuses on product changes. A "major change" refers to updates that have a direct impact on the end user, such as introducing new features, modifying existing functionality, or optimizing runtime performance. +On the other hand, changes that do not need to be reflected in the Changelog include code refactoring, writing tests, or automating processes, as these do not directly affect the user experience. + ## Git Commit Messages Aim to use descriptive git commit messages. We try to follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/). From b2fe44176c36cb9e360d9b38f4aa3e6a0668cad0 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 19 Mar 2025 07:28:23 -0700 Subject: [PATCH 153/238] fix: assertion for commit amount in rbf --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 8 +++++--- testnet/stacks-node/src/run_loop/neon.rs | 6 ++++++ testnet/stacks-node/src/tests/nakamoto_integrations.rs | 6 ++++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index f35079abde9..8e3d6ca6e32 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1682,9 +1682,11 @@ impl RelayerThread { // update local state last_committed.set_txid(&txid); - self.globals - .counters - .bump_naka_submitted_commits(last_committed.burn_tip.block_height, tip_height); + self.globals.counters.bump_naka_submitted_commits( + last_committed.burn_tip.block_height, + tip_height, + last_committed.block_commit.burn_fee, + ); self.last_committed = Some(last_committed); Ok(()) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index f7effad9ba4..f5c045b6a5c 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -117,6 +117,7 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, pub naka_miner_directives: RunLoopCounter, pub naka_submitted_commit_last_stacks_tip: RunLoopCounter, + pub naka_submitted_commit_last_commit_amount: RunLoopCounter, pub naka_miner_current_rejections: RunLoopCounter, pub naka_miner_current_rejections_timeout_secs: RunLoopCounter, @@ -178,6 +179,7 @@ impl Counters { &self, committed_burn_height: u64, committed_stacks_height: u64, + committed_sats_amount: u64, ) { Counters::inc(&self.naka_submitted_commits); Counters::set( @@ -188,6 +190,10 @@ impl Counters { &self.naka_submitted_commit_last_stacks_tip, committed_stacks_height, ); + Counters::set( + &self.naka_submitted_commit_last_commit_amount, + committed_sats_amount, + ); } pub fn bump_naka_mined_blocks(&self) { diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3bfbf35e88d..b6b4bb76451 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -11355,6 +11355,8 @@ fn rbf_on_config_change() { let commits_before = counters.naka_submitted_commits.get(); + let commit_amount_before = counters.naka_submitted_commit_last_commit_amount.get(); + info!("---- Updating config ----"); update_config(155000, 57); @@ -11365,6 +11367,10 @@ fn rbf_on_config_change() { }) .expect("Expected new commit after config change"); + let commit_amount_after = counters.naka_submitted_commit_last_commit_amount.get(); + assert_eq!(commit_amount_after, 155000); + assert_ne!(commit_amount_after, commit_amount_before); + coord_channel .lock() .expect("Mutex poisoned") From 33b79cc8d2dc31a7a12248b7e232ac67731e5c38 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 19 Mar 2025 15:50:34 +0100 Subject: [PATCH 154/238] added inegration test --- clarity/src/vm/clarity.rs | 9 ++ testnet/stacks-node/src/tests/signer/v0.rs | 150 ++++++++++----------- 2 files changed, 82 insertions(+), 77 deletions(-) diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index a2458484e8c..43520449ea7 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -61,6 +61,9 @@ impl From for Error { CheckErrors::MemoryBalanceExceeded(_a, _b) => { Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) } + CheckErrors::ExecutionTimeExpired => { + Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) + } _ => Error::Analysis(e), } } @@ -75,6 +78,9 @@ impl From for Error { InterpreterError::Unchecked(CheckErrors::CostOverflow) => { Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) } + InterpreterError::Unchecked(CheckErrors::ExecutionTimeExpired) => { + Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) + } _ => Error::Interpreter(e), } } @@ -90,6 +96,9 @@ impl From for Error { ParseErrors::MemoryBalanceExceeded(_a, _b) => { Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) } + ParseErrors::ExecutionTimeExpired => { + Error::CostError(ExecutionCost::max_value(), ExecutionCost::max_value()) + } _ => Error::Parse(e), } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 61ce7d42fc3..39e718c73fb 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -955,6 +955,20 @@ impl MultipleMinerTest { self.signer_test.shutdown(); } + pub fn wait_for_test_observer_blocks(&self, timeout_secs: u64) { + let block_header_heash_tip = format!("0x{}", self.get_peer_stacks_tip().to_hex()); + + wait_for(timeout_secs, || { + for block in test_observer::get_blocks().iter().rev() { + if block["block_hash"].as_str().unwrap() == block_header_heash_tip { + return Ok(true); + } + } + Ok(false) + }) + .expect("Timed out waiting for test_observer blocks"); + } + /// Wait for both miners to have the same stacks tip height pub fn wait_for_chains(&self, timeout_secs: u64) { wait_for(timeout_secs, || { @@ -992,11 +1006,10 @@ fn last_block_contains_tenure_change_tx(cause: TenureChangeCause) -> bool { } } -// Returns whether the last block in the test observer contains a tenure change -/// transaction with the given cause. +/// Check if a txid exists in the last block fn last_block_contains_txid(txid: &str) -> bool { let blocks = test_observer::get_blocks(); - let last_block = &blocks.last().unwrap(); + let last_block = blocks.last().unwrap(); let transactions = last_block["transactions"].as_array().unwrap(); for tx in transactions { let raw_tx = tx["raw_tx"].as_str().unwrap(); @@ -12514,15 +12527,14 @@ fn signer_can_accept_rejected_block() { } /// Test a scenario where: -/// Two miners boot to Nakamoto. +/// Two miners boot to Nakamoto (first miner has max_execution_time set to 0). /// Sortition occurs. Miner 1 wins. -/// Miner 1 proposes a block N -/// Signers accept and the stacks tip advances to N -/// Miner 1's block commits are paused so it cannot confirm the next tenure. +/// Miner 1 successfully mines block N with contract-publish +/// Miner 1 successfully mines block N+1 with transfer and a contract-call that gets rejected (by max_execution_time) +/// Miner 1 successfully mines block N+2 with transfer tx (this is mainly for ensuring everything still works after the expiration time) /// Sortition occurs. Miner 2 wins. -/// Miner 2 successfully mines blocks N+1, N+2, and N+3 -/// Sortition occurs quickly, within first_proposal_burn_block_timing_secs. Miner 1 wins. -/// Miner 1 proposes block N+1' but gets rejected as more than one block has been mined in the current tenure (by miner2) +/// Miner 2 successfully mines block N+3 including the contract-call previously rejected by miner 1 +/// Ensures both the miners are aligned #[test] #[ignore] fn miner_rejection_by_contract_call_execution_time_expired() { @@ -12555,7 +12567,7 @@ fn miner_rejection_by_contract_call_execution_time_expired() { let (conf_1, _) = miners.get_node_configs(); let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); - let (miner_pk_1, miner_pk_2) = miners.get_miner_public_keys(); + let (_miner_pk_1, miner_pk_2) = miners.get_miner_public_keys(); info!("------------------------- Pause Miner 2's Block Commits -------------------------"); @@ -12570,100 +12582,84 @@ fn miner_rejection_by_contract_call_execution_time_expired() { info!("------------------------- Pause Miner 1's Block Commits -------------------------"); rl1_skip_commit_op.set(true); - // First, lets deploy the contract - let dummy_contract_src = " - (define-public (run-f) - (ok (1))) - "; - info!("------------------------- Miner 1 Mines a Nakamoto Block N -------------------------"); miners .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) .expect("Failed to mine BTC block followed by Block N"); + miners.wait_for_test_observer_blocks(60); + + miners.send_fee = 300; // First, lets deploy the contract - let _contract_publish_txid = - miners.send_and_mine_contract_publish("dummy-contract", dummy_contract_src, 60); + let dummy_contract_src = "(define-public (dummy (number uint)) (begin (ok (+ number u1))))"; + + let contract_publish_txid = miners + .send_and_mine_contract_publish("dummy-contract", dummy_contract_src, 60) + .expect("Failed to publish contract in a new block"); + + miners.wait_for_test_observer_blocks(60); + + assert_eq!(last_block_contains_txid(&contract_publish_txid), true); + + info!("------------------------- Miner 1 Mines a Nakamoto Block N+1 -------------------------"); let stacks_height_before = miners.get_peer_stacks_tip_height(); - // try calling it (has to fail) - let contract_call_txid = miners.send_contract_call("dummy-contract", "f", &[]); + let tx1 = miners.send_transfer_tx(); - let miner_1_block_n = - wait_for_block_pushed_by_miner_key(30, stacks_height_before + 1, &miner_pk_1) - .expect("Failed to get block N+1"); + // try calling the contract (has to fail) + let contract_call_txid = + miners.send_contract_call("dummy-contract", "dummy", &[clarity::vm::Value::UInt(1)]); + let _ = wait_for(60, || { + Ok(miners.get_peer_stacks_tip_height() > stacks_height_before) + }); + + miners.wait_for_test_observer_blocks(60); + + assert_eq!(last_block_contains_txid(&tx1), true); assert_eq!(last_block_contains_txid(&contract_call_txid), false); - // assure we have a successful sortition that miner 1 won + info!("------------------------- Miner 1 Mines a Nakamoto Block N+2 -------------------------"); + + miners.sender_nonce -= 1; + + let tx2 = miners + .send_and_mine_transfer_tx(60) + .expect("Failed to mine N + 2"); + + miners.wait_for_test_observer_blocks(60); + + assert_eq!(last_block_contains_txid(&tx2), true); + verify_sortition_winner(&sortdb, &miner_pkh_1); info!("------------------------- Miner 2 Submits a Block Commit -------------------------"); miners.submit_commit_miner_2(&sortdb); - info!("------------------------- Pause Miner 2's Block Mining -------------------------"); - TEST_MINE_STALL.set(true); - info!("------------------------- Mine Tenure -------------------------"); miners - .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 60) - .expect("Failed to mine BTC block"); - - info!("------------------------- Miner 1 Submits a Block Commit -------------------------"); - miners.submit_commit_miner_1(&sortdb); + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block followed by Block N+3"); - info!("------------------------- Miner 2 Mines Block N+2 -------------------------"); + info!("------------------------- Miner 2 Mines Block N+3 -------------------------"); - TEST_MINE_STALL.set(false); - let _ = wait_for_block_pushed_by_miner_key(30, block_n_height + 1, &miner_pk_2) - .expect("Failed to get block N+1"); + let stacks_height_before = miners.get_peer_stacks_tip_height(); - // assure we have a successful sortition that miner 2 won - verify_sortition_winner(&sortdb, &miner_pkh_2); + let contract_call_txid = + miners.send_contract_call("dummy-contract", "dummy", &[clarity::vm::Value::UInt(1)]); - assert_eq!( - get_chain_info(&conf_1).stacks_tip_height, - block_n_height + 1 - ); - - info!("------------------------- Miner 2 Mines N+2 and N+3 -------------------------"); - miners - .send_and_mine_transfer_tx(30) - .expect("Failed to send and mine transfer tx"); - miners - .send_and_mine_transfer_tx(30) - .expect("Failed to send and mine transfer tx"); - assert_eq!( - get_chain_info(&conf_1).stacks_tip_height, - block_n_height + 3 - ); + let _ = wait_for_block_pushed_by_miner_key(30, stacks_height_before + 1, &miner_pk_2) + .expect("Failed to get block N+3"); - info!("------------------------- Miner 1 Wins the Next Tenure, Mines N+3 -------------------------"); - miners.btc_regtest_controller_mut().build_next_block(1); + miners.wait_for_test_observer_blocks(60); - let _ = wait_for_block_pushed_by_miner_key(30, block_n_height + 1, &miner_pk_2) - .expect("Failed to get block N+3"); + assert_eq!(last_block_contains_txid(&contract_call_txid), true); - // check N+2 contains the contract call (previously rejected by miner 1) - let miner1_blocks_after_boot_to_epoch3 = get_nakamoto_headers(&conf_1) - .into_iter() - .filter(|block| { - // skip first nakamoto block - if block.stacks_block_height == stacks_height_before { - return false; - } - let nakamoto_block_header = block.anchored_header.as_stacks_nakamoto().unwrap(); - miner_pk_1 - .verify( - nakamoto_block_header.miner_signature_hash().as_bytes(), - &nakamoto_block_header.miner_signature, - ) - .unwrap() - }) - .count(); + verify_sortition_winner(&sortdb, &miner_pkh_2); - assert_eq!(miner1_blocks_after_boot_to_epoch3, 1); + // ensure both miners are aligned + miners.wait_for_chains(60); info!("------------------------- Shutdown -------------------------"); miners.shutdown(); From 5612e6fafa2efba53d4e21ba3d5e9963f2ee5a79 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 19 Mar 2025 16:13:11 +0100 Subject: [PATCH 155/238] fixed clippy check --- clarity/src/vm/clarity.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index 43520449ea7..1ef22cbc58b 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -293,6 +293,7 @@ pub trait TransactionConnection: ClarityConnection { /// abort_call_back is called with an AssetMap and a ClarityDatabase reference, /// if abort_call_back returns true, all modifications from this transaction will be rolled back. /// otherwise, they will be committed (though they may later be rolled back if the block itself is rolled back). + #[allow(clippy::too_many_arguments)] fn run_contract_call( &mut self, sender: &PrincipalData, From 477eae60266a8040523681603b4ef91cfd049a69 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 19 Mar 2025 08:18:59 -0700 Subject: [PATCH 156/238] fix: properly update last_burnchain_config --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 8e3d6ca6e32..ddccce2deed 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -2144,15 +2144,11 @@ impl RelayerThread { } else { false }; - if burnchain_config_changed { - info!( - "Burnchain config changed; updating spend amount {}", - burnchain_config.burn_fee_cap - ); - } self.globals .set_last_miner_spend_amount(burnchain_config.burn_fee_cap); + self.globals + .set_last_burnchain_config(burnchain_config.clone()); set_mining_spend_amount( self.globals.get_miner_status(), @@ -2170,9 +2166,6 @@ impl RelayerThread { } else { false }; - if miner_config_changed { - info!("Miner config changed; forcing a re-mine attempt"); - } self.globals.set_last_miner_config(miner_config); From e085939fca318a0fa497753984d8b88edcce7e03 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 19 Mar 2025 11:48:58 -0500 Subject: [PATCH 157/238] test fixes, some cleanup --- stacks-signer/src/lib.rs | 12 ++--- stacks-signer/src/runloop.rs | 16 +++--- stacks-signer/src/v0/signer.rs | 2 +- testnet/stacks-node/src/tests/signer/mod.rs | 46 ++++++++++++----- testnet/stacks-node/src/tests/signer/v0.rs | 56 +++++++++++++-------- 5 files changed, 84 insertions(+), 48 deletions(-) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 7c646f69fcb..005cb2cb696 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -72,7 +72,7 @@ pub trait Signer: Debug + Display { stacks_client: &StacksClient, sortition_state: &mut Option, event: Option<&SignerEvent>, - res: &Sender>, + res: &Sender, current_reward_cycle: u64, ); /// Check if the signer is in the middle of processing blocks @@ -82,18 +82,18 @@ pub trait Signer: Debug + Display { } /// A wrapper around the running signer type for the signer -pub type RunningSigner = libsigner::RunningSigner, Vec, T>; +pub type RunningSigner = libsigner::RunningSigner, SignerResult, T>; /// The wrapper for the runloop signer type type RunLoopSigner = - libsigner::Signer, RunLoop, SignerEventReceiver, T>; + libsigner::Signer, SignerEventReceiver, T>; /// The spawned signer pub struct SpawnedSigner + Send, T: SignerEventTrait> { /// The underlying running signer thread handle running_signer: RunningSigner, /// The result receiver for interacting with the running signer - pub res_recv: Receiver>, + pub res_recv: Receiver, /// The spawned signer's config pub config: GlobalConfig, /// Phantom data for the signer type @@ -102,12 +102,12 @@ pub struct SpawnedSigner + Send, T: SignerEventTrait> { impl + Send, T: SignerEventTrait> SpawnedSigner { /// Stop the signer thread and return the final state - pub fn stop(self) -> Option> { + pub fn stop(self) -> Option { self.running_signer.stop() } /// Wait for the signer to terminate, and get the final state. WARNING: This will hang forever if the event receiver stop signal was never sent/no error occurred. - pub fn join(self) -> Option> { + pub fn join(self) -> Option { self.running_signer.join() } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 2af3ca5155f..721c4c9ea1e 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -481,7 +481,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo } impl, T: StacksMessageCodec + Clone + Send + Debug> - SignerRunLoop, T> for RunLoop + SignerRunLoop for RunLoop { fn set_event_timeout(&mut self, timeout: Duration) { self.config.event_timeout = timeout; @@ -494,8 +494,8 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> fn run_one_pass( &mut self, event: Option>, - res: &Sender>, - ) -> Option> { + res: &Sender, + ) -> Option { debug!( "Running one pass for the signer. state={:?}, event={event:?}", self.state @@ -536,8 +536,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> // This is the only event that we respond to from the outer signer runloop if let Some(SignerEvent::StatusCheck) = event { - info!("Signer status check requested: {:?}.", self.state); - if let Err(e) = res.send(vec![StateInfo { + let state_info = StateInfo { runloop_state: self.state, reward_cycle_info: self.current_reward_cycle_info, running_signers: self @@ -558,9 +557,10 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> ) }) .collect(), - } - .into()]) - { + }; + info!("Signer status check requested: {state_info:?}"); + + if let Err(e) = res.send(state_info.into()) { error!("Failed to send status check result: {e}."); } } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 69314274200..792b405b33a 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -176,7 +176,7 @@ impl SignerTrait for Signer { stacks_client: &StacksClient, sortition_state: &mut Option, event: Option<&SignerEvent>, - _res: &Sender>, + _res: &Sender, current_reward_cycle: u64, ) { let event_parity = match event { diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 87a54522ed4..cbcb7662e5c 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -214,13 +214,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest) { - for signer_ix in 0..self.spawned_signers.len() { + for (signer_ix, signer_config) in self.signer_configs.iter().enumerate() { if exclude.contains(&signer_ix) { continue; } - let port = 3000 + signer_ix; - let endpoint = format!("http://localhost:{port}"); - let path = format!("{endpoint}/status"); + let path = format!("http://{}/status", signer_config.endpoint); debug!("Issue status request to {path}"); let client = reqwest::blocking::Client::new(); @@ -471,7 +469,14 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = signer_state.signer_state_machines.iter().map(|(rc, state)| { + (rc, state.is_some()) + }).collect(); + warn!( + "Local state machine for signer #{ix} not set for reward cycle #{current_rc} yet"; + "burn_block_height" => info_cur.burn_block_height, + "rcs_set" => ?rcs_set + ); return false; }; @@ -702,6 +707,24 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>, + new_signers_sks: Vec, + new_signer_configs: Vec, + ) -> ( + Vec>, + Vec, + Vec, + ) { + let old_signers = std::mem::replace(&mut self.spawned_signers, new_signers); + let old_signers_sks = + std::mem::replace(&mut self.signer_stacks_private_keys, new_signers_sks); + let old_signers_confs = std::mem::replace(&mut self.signer_configs, new_signer_configs); + (old_signers, old_signers_sks, old_signers_confs) + } + /// Get status check results (if returned) from each signer without blocking /// Returns Some() or None() for each signer, in order of `self.spawned_signers` pub fn get_states(&mut self, exclude: &HashSet) -> Vec> { @@ -711,17 +734,14 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = new_signer_configs + .iter() + .map(|conf_str| SignerConfig::load_from_str(conf_str).unwrap()) + .collect(); + let new_spawned_signers: Vec<_> = new_signer_configs .iter() - .map(|conf| { + .map(|signer_config| { info!("spawning signer"); - let signer_config = SignerConfig::load_from_str(conf).unwrap(); - SpawnedSigner::new(signer_config) + SpawnedSigner::new(signer_config.clone()) }) .collect(); @@ -4663,8 +4667,7 @@ fn signer_set_rollover() { initial_balances, |_| {}, |naka_conf| { - for toml in new_signer_configs.clone() { - let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + for signer_config in new_signer_configs.clone() { info!( "---- Adding signer endpoint to naka conf ({}) ----", signer_config.endpoint @@ -4697,9 +4700,8 @@ fn signer_set_rollover() { let short_timeout = Duration::from_secs(20); // Verify that naka_conf has our new signer's event observers - for toml in &new_signer_configs { - let signer_config = SignerConfig::load_from_str(toml).unwrap(); - let endpoint = format!("{}", signer_config.endpoint); + for signer_config in &new_signer_configs { + let endpoint = signer_config.endpoint.to_string(); assert!(signer_test .running_nodes .conf @@ -4843,7 +4845,20 @@ fn signer_set_rollover() { assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); } - info!("---- Mining to the next reward cycle (block {next_cycle_height}) -----",); + info!("---- Mining to just before the next reward cycle (block {next_cycle_height}) -----",); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height.saturating_sub(1), + new_num_signers, + ); + + let (old_spawned_signers, _, _) = signer_test.replace_signers( + new_spawned_signers, + new_signer_private_keys, + new_signer_configs, + ); + + info!("---- Mining into the next reward cycle (block {next_cycle_height}) -----",); signer_test.run_until_burnchain_height_nakamoto( Duration::from_secs(60), next_cycle_height, @@ -4887,7 +4902,7 @@ fn signer_set_rollover() { } signer_test.shutdown(); - for signer in new_spawned_signers { + for signer in old_spawned_signers { assert!(signer.stop().is_none()); } } @@ -12308,6 +12323,7 @@ fn signer_can_accept_rejected_block() { info!("Submitted transfer tx and waiting for block proposal"); let block = wait_for_block_proposal(30, block_height_before + 1, &miner_pk) .expect("Timed out waiting for block proposal"); + let expected_block_height = block.header.chain_length; // Wait for signer[0] to reject the block wait_for_block_rejections(30, block.header.signer_signature_hash(), 1) @@ -12337,18 +12353,18 @@ fn signer_can_accept_rejected_block() { wait_for(60, || { let blocks = test_observer::get_blocks(); - // Look for a block with height `block_height_before + 1` - if let Some(block) = blocks + // Look for a block with expected height + let Some(block) = blocks .iter() - .find(|block| block["block_height"].as_u64() == Some(block_height_before + 1)) - { - if transfers_in_block(block) == 1 { - Ok(true) // Success: found the block with exactly 1 transfer - } else { - Err("Transfer included in block".into()) // Found the block, but it has the wrong number of transfers - } + .find(|block| block["block_height"].as_u64() == Some(expected_block_height)) else { + return Ok(false) // Keep waiting if the block hasn't appeared yet + }; + + let transfers_included_in_block = transfers_in_block(block); + if transfers_included_in_block == 1 { + Ok(true) // Success: found the block with exactly 1 transfer } else { - Ok(false) // Keep waiting if the block hasn't appeared yet + Err(format!("Unexpected amount of transfers included in block. Found: {transfers_included_in_block}")) } }) .expect("Timed out waiting for block"); From 8c7f3fab31054d12ea513f182b87c3ae2fdabef3 Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 19 Mar 2025 09:55:18 -0700 Subject: [PATCH 158/238] Revert "remove tests related to make_signed_microblock and make_mblock_tx_chain" This reverts commit f4f234b1f95cfd63dcf3806f921d13034660379a. --- .../src/chainstate/stacks/db/transactions.rs | 430 ++++++++++++++++++ .../src/tests/neon_integrations.rs | 28 ++ 2 files changed, 458 insertions(+) diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 1185124dfca..f92bde7d981 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -8112,6 +8112,436 @@ pub mod test { conn.commit_block(); } + fn make_signed_microblock( + block_privk: &StacksPrivateKey, + tx_privk: &StacksPrivateKey, + parent_block: BlockHeaderHash, + seq: u16, + ) -> StacksMicroblock { + // make transaction + let contract = r#" + (define-public (send-stx (amount uint) (recipient principal)) + (stx-transfer? amount tx-sender recipient)) + "#; + + let auth = TransactionAuth::from_p2pkh(tx_privk).unwrap(); + let addr = auth.origin().address_testnet(); + + let mut rng = rand::thread_rng(); + + let mut tx_contract_create = StacksTransaction::new( + TransactionVersion::Testnet, + auth, + TransactionPayload::new_smart_contract( + &format!("hello-world-{}", &rng.gen::()), + contract, + None, + ) + .unwrap(), + ); + + tx_contract_create.chain_id = 0x80000000; + tx_contract_create.set_tx_fee(0); + + let mut signer = StacksTransactionSigner::new(&tx_contract_create); + signer.sign_origin(tx_privk).unwrap(); + + let signed_contract_tx = signer.get_tx().unwrap(); + + // make block + let txs = vec![signed_contract_tx]; + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + + let mut mblock = StacksMicroblock { + header: StacksMicroblockHeader { + version: 0x12, + sequence: seq, + prev_block: parent_block, + tx_merkle_root, + signature: MessageSignature([0u8; 65]), + }, + txs, + }; + mblock.sign(block_privk).unwrap(); + mblock + } + + #[test] + fn process_poison_microblock_same_block() { + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); + let addr = auth.origin().address_testnet(); + + let balances = vec![(addr.clone(), 1000000000)]; + + let mut chainstate = + instantiate_chainstate_with_balances(false, 0x80000000, function_name!(), balances); + + let block_privk = StacksPrivateKey::from_hex( + "2f90f1b148207a110aa58d1b998510407420d7a8065d4fdfc0bbe22c5d9f1c6a01", + ) + .unwrap(); + + let block_pubkh = + Hash160::from_node_public_key(&StacksPublicKey::from_private(&block_privk)); + + let reporter_privk = StacksPrivateKey::from_hex( + "e606e944014b2a9788d0e3c8defaf6bc44b1e3ab881aaba32faa6e32002b7e1f01", + ) + .unwrap(); + let reporter_addr = TransactionAuth::from_p2pkh(&reporter_privk) + .unwrap() + .origin() + .address_testnet(); + + for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { + let mut conn = chainstate.block_begin( + *burn_db, + &FIRST_BURNCHAIN_CONSENSUS_HASH, + &FIRST_STACKS_BLOCK_HASH, + &ConsensusHash([(dbi + 1) as u8; 20]), + &BlockHeaderHash([(dbi + 1) as u8; 32]), + ); + + StacksChainState::insert_microblock_pubkey_hash(&mut conn, 1, &block_pubkh).unwrap(); + + let height_opt = + StacksChainState::has_microblock_pubkey_hash(&mut conn, &block_pubkh).unwrap(); + assert_eq!(height_opt.unwrap(), 1); + + // make poison + let mblock_1 = + make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x11; 32]), 123); + let mblock_2 = + make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x11; 32]), 123); + assert!(mblock_1 != mblock_2); + + // report poison (in the same block) + let mut tx_poison_microblock = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&reporter_privk).unwrap(), + TransactionPayload::PoisonMicroblock( + mblock_1.header.clone(), + mblock_2.header.clone(), + ), + ); + + tx_poison_microblock.chain_id = 0x80000000; + tx_poison_microblock.set_tx_fee(0); + + let mut signer = StacksTransactionSigner::new(&tx_poison_microblock); + signer.sign_origin(&reporter_privk).unwrap(); + let signed_tx_poison_microblock = signer.get_tx().unwrap(); + + // process it! + let (fee, receipt) = StacksChainState::process_transaction( + &mut conn, + &signed_tx_poison_microblock, + false, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // there must be a poison record for this microblock, from the reporter, for the microblock + // sequence. + let report_opt = StacksChainState::get_poison_microblock_report(&mut conn, 1).unwrap(); + assert_eq!(report_opt.unwrap(), (reporter_addr, 123)); + + // result must encode poison information + let result_data = receipt.result.expect_tuple().unwrap(); + + let height = result_data + .get("block_height") + .unwrap() + .to_owned() + .expect_u128() + .unwrap(); + let mblock_pubkh = result_data + .get("microblock_pubkey_hash") + .unwrap() + .to_owned() + .expect_buff(20) + .unwrap(); + let reporter = result_data + .get("reporter") + .unwrap() + .to_owned() + .expect_principal() + .unwrap(); + let seq = result_data + .get("sequence") + .unwrap() + .to_owned() + .expect_u128() + .unwrap(); + + assert_eq!(height, 1); + assert_eq!(mblock_pubkh, block_pubkh.0.to_vec()); + assert_eq!(seq, 123); + assert_eq!(reporter, reporter_addr.to_account_principal()); + + conn.commit_block(); + } + } + + #[test] + fn process_poison_microblock_invalid_transaction() { + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); + let addr = auth.origin().address_testnet(); + + let balances = vec![(addr.clone(), 1000000000)]; + + let mut chainstate = + instantiate_chainstate_with_balances(false, 0x80000000, function_name!(), balances); + + let block_privk = StacksPrivateKey::from_hex( + "2f90f1b148207a110aa58d1b998510407420d7a8065d4fdfc0bbe22c5d9f1c6a01", + ) + .unwrap(); + + let block_pubkh = + Hash160::from_node_public_key(&StacksPublicKey::from_private(&block_privk)); + + let reporter_privk = StacksPrivateKey::from_hex( + "e606e944014b2a9788d0e3c8defaf6bc44b1e3ab881aaba32faa6e32002b7e1f01", + ) + .unwrap(); + let reporter_addr = TransactionAuth::from_p2pkh(&reporter_privk) + .unwrap() + .origin() + .address_testnet(); + + for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { + let mut conn = chainstate.block_begin( + *burn_db, + &FIRST_BURNCHAIN_CONSENSUS_HASH, + &FIRST_STACKS_BLOCK_HASH, + &ConsensusHash([(dbi + 1) as u8; 20]), + &BlockHeaderHash([(dbi + 1) as u8; 32]), + ); + + StacksChainState::insert_microblock_pubkey_hash(&mut conn, 1, &block_pubkh).unwrap(); + + let height_opt = + StacksChainState::has_microblock_pubkey_hash(&mut conn, &block_pubkh).unwrap(); + assert_eq!(height_opt.unwrap(), 1); + + // make poison, but for an unknown microblock fork + let mblock_1 = make_signed_microblock(&privk, &privk, BlockHeaderHash([0x11; 32]), 123); + let mblock_2 = make_signed_microblock(&privk, &privk, BlockHeaderHash([0x11; 32]), 123); + assert!(mblock_1 != mblock_2); + + // report poison (in the same block) + let mut tx_poison_microblock = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&reporter_privk).unwrap(), + TransactionPayload::PoisonMicroblock( + mblock_1.header.clone(), + mblock_2.header.clone(), + ), + ); + + tx_poison_microblock.chain_id = 0x80000000; + tx_poison_microblock.set_tx_fee(0); + + let mut signer = StacksTransactionSigner::new(&tx_poison_microblock); + signer.sign_origin(&reporter_privk).unwrap(); + let signed_tx_poison_microblock = signer.get_tx().unwrap(); + + // should fail to process -- the transaction is invalid if it doesn't point to a known + // microblock pubkey hash. + let err = StacksChainState::process_transaction( + &mut conn, + &signed_tx_poison_microblock, + false, + ASTRules::PrecheckSize, + ) + .unwrap_err(); + let Error::ClarityError(clarity_error::BadTransaction(msg)) = &err else { + panic!("Unexpected error type"); + }; + assert!(msg.find("never seen in this fork").is_some()); + conn.commit_block(); + } + } + + #[test] + fn process_poison_microblock_multiple_same_block() { + let privk = StacksPrivateKey::from_hex( + "6d430bb91222408e7706c9001cfaeb91b08c2be6d5ac95779ab52c6b431950e001", + ) + .unwrap(); + let auth = TransactionAuth::from_p2pkh(&privk).unwrap(); + let addr = auth.origin().address_testnet(); + + let balances = vec![(addr.clone(), 1000000000)]; + + let mut chainstate = + instantiate_chainstate_with_balances(false, 0x80000000, function_name!(), balances); + + let block_privk = StacksPrivateKey::from_hex( + "2f90f1b148207a110aa58d1b998510407420d7a8065d4fdfc0bbe22c5d9f1c6a01", + ) + .unwrap(); + + let block_pubkh = + Hash160::from_node_public_key(&StacksPublicKey::from_private(&block_privk)); + + let reporter_privk_1 = StacksPrivateKey::from_hex( + "e606e944014b2a9788d0e3c8defaf6bc44b1e3ab881aaba32faa6e32002b7e1f01", + ) + .unwrap(); + let reporter_privk_2 = StacksPrivateKey::from_hex( + "ca7ba28b9604418413a16d74e7dbe5c3e0012281183f590940bab0208c40faee01", + ) + .unwrap(); + let reporter_addr_1 = TransactionAuth::from_p2pkh(&reporter_privk_1) + .unwrap() + .origin() + .address_testnet(); + let reporter_addr_2 = TransactionAuth::from_p2pkh(&reporter_privk_2) + .unwrap() + .origin() + .address_testnet(); + + for (dbi, burn_db) in ALL_BURN_DBS.iter().enumerate() { + let mut conn = chainstate.block_begin( + *burn_db, + &FIRST_BURNCHAIN_CONSENSUS_HASH, + &FIRST_STACKS_BLOCK_HASH, + &ConsensusHash([(dbi + 1) as u8; 20]), + &BlockHeaderHash([(dbi + 1) as u8; 32]), + ); + + StacksChainState::insert_microblock_pubkey_hash(&mut conn, 1, &block_pubkh).unwrap(); + + let height_opt = + StacksChainState::has_microblock_pubkey_hash(&mut conn, &block_pubkh).unwrap(); + assert_eq!(height_opt.unwrap(), 1); + + // make two sets of poisons + let mblock_1_1 = + make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x11; 32]), 123); + let mblock_1_2 = + make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x11; 32]), 123); + assert!(mblock_1_1 != mblock_1_2); + + // report poison (in the same block) + let mut tx_poison_microblock_1 = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&reporter_privk_1).unwrap(), + TransactionPayload::PoisonMicroblock( + mblock_1_1.header.clone(), + mblock_1_2.header.clone(), + ), + ); + + tx_poison_microblock_1.chain_id = 0x80000000; + tx_poison_microblock_1.set_tx_fee(0); + + let mut signer = StacksTransactionSigner::new(&tx_poison_microblock_1); + signer.sign_origin(&reporter_privk_1).unwrap(); + let signed_tx_poison_microblock_1 = signer.get_tx().unwrap(); + + // make two sets of poisons + let mblock_2_1 = + make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x10; 32]), 122); + let mblock_2_2 = + make_signed_microblock(&block_privk, &privk, BlockHeaderHash([0x10; 32]), 122); + assert!(mblock_2_1 != mblock_2_2); + + // report poison (in the same block) + let mut tx_poison_microblock_2 = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&reporter_privk_2).unwrap(), + TransactionPayload::PoisonMicroblock( + mblock_2_1.header.clone(), + mblock_2_2.header.clone(), + ), + ); + + tx_poison_microblock_2.chain_id = 0x80000000; + tx_poison_microblock_2.set_tx_fee(0); + + let mut signer = StacksTransactionSigner::new(&tx_poison_microblock_2); + signer.sign_origin(&reporter_privk_2).unwrap(); + let signed_tx_poison_microblock_2 = signer.get_tx().unwrap(); + + // process it! + let (fee, receipt) = StacksChainState::process_transaction( + &mut conn, + &signed_tx_poison_microblock_1, + false, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // there must be a poison record for this microblock, from the reporter, for the microblock + // sequence. + let report_opt = StacksChainState::get_poison_microblock_report(&mut conn, 1).unwrap(); + assert_eq!(report_opt.unwrap(), (reporter_addr_1, 123)); + + // process the second one! + let (fee, receipt) = StacksChainState::process_transaction( + &mut conn, + &signed_tx_poison_microblock_2, + false, + ASTRules::PrecheckSize, + ) + .unwrap(); + + // there must be a poison record for this microblock, from the reporter, for the microblock + // sequence. Moreover, since the fork was earlier in the stream, the second reporter gets + // it. + let report_opt = StacksChainState::get_poison_microblock_report(&mut conn, 1).unwrap(); + assert_eq!(report_opt.unwrap(), (reporter_addr_2, 122)); + + // result must encode poison information + let result_data = receipt.result.expect_tuple().unwrap(); + + let height = result_data + .get("block_height") + .unwrap() + .to_owned() + .expect_u128() + .unwrap(); + let mblock_pubkh = result_data + .get("microblock_pubkey_hash") + .unwrap() + .to_owned() + .expect_buff(20) + .unwrap(); + let reporter = result_data + .get("reporter") + .unwrap() + .to_owned() + .expect_principal() + .unwrap(); + let seq = result_data + .get("sequence") + .unwrap() + .to_owned() + .expect_u128() + .unwrap(); + + assert_eq!(height, 1); + assert_eq!(mblock_pubkh, block_pubkh.0.to_vec()); + assert_eq!(seq, 122); + assert_eq!(reporter, reporter_addr_2.to_account_principal()); + + conn.commit_block(); + } + } + #[test] fn test_get_tx_clarity_version_v205() { struct MockedBurnDB {} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index b41bbb11599..3d912fbe5b8 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -3316,6 +3316,34 @@ fn should_fix_2771() { channel.stop_chains_coordinator(); } +/// Returns a StacksMicroblock with the given transactions, sequence, and parent block that is +/// signed with the given private key. +fn make_signed_microblock( + block_privk: &StacksPrivateKey, + txs: Vec, + parent_block: BlockHeaderHash, + seq: u16, +) -> StacksMicroblock { + let mut rng = rand::thread_rng(); + + let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + + let mut mblock = StacksMicroblock { + header: StacksMicroblockHeader { + version: rng.gen(), + sequence: seq, + prev_block: parent_block, + tx_merkle_root, + signature: MessageSignature([0u8; 65]), + }, + txs, + }; + mblock.sign(block_privk).unwrap(); + mblock +} + #[test] #[ignore] fn filter_low_fee_tx_integration_test() { From 6b9771dd6ecf9311055cd90acefc135f8baf776a Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Wed, 19 Mar 2025 09:55:29 -0700 Subject: [PATCH 159/238] Revert "Remove fn make_mblock_tx_chain" This reverts commit 500c434b4d9e5e9112edbcbaf3c38c74975f6c51. --- .../src/tests/neon_integrations.rs | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 3d912fbe5b8..5111393dd8c 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -8614,6 +8614,38 @@ pub fn make_random_tx_chain( chain } +fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) -> Vec> { + let addr = to_addr(privk); + let mut chain = vec![]; + + for nonce in 0..25 { + // N.B. private keys are 32-33 bytes, so this is always safe + let random_iters = privk.to_bytes()[nonce as usize] as usize; + + let be_bytes = [ + privk.to_bytes()[nonce as usize], + privk.to_bytes()[(nonce + 1) as usize], + ]; + + let random_extra_fee = u16::from_be_bytes(be_bytes) as u64; + + let mut addr_prefix = addr.to_string(); + let _ = addr_prefix.split_off(12); + let contract_name = format!("crct-{nonce}-{addr_prefix}-{random_iters}"); + eprintln!("Make tx {contract_name}"); + let tx = make_contract_publish_microblock_only( + privk, + nonce, + 1049230 + nonce + fee_plus + random_extra_fee, + chain_id, + &contract_name, + &make_runtime_sized_contract(1, nonce, &addr_prefix), + ); + chain.push(tx); + } + chain +} + fn test_competing_miners_build_on_same_chain( num_miners: usize, conf_template: Config, From bc079b30402bd616831528f1f7c072e108ea804e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 19 Mar 2025 12:06:16 -0500 Subject: [PATCH 160/238] do not try to update state machine for inactive reward cycle --- stacks-signer/src/v0/signer.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 792b405b33a..8903cc3c852 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -213,8 +213,10 @@ impl SignerTrait for Signer { return; } - self.local_state_machine.handle_pending_update(&self.signer_db, stacks_client, &self.proposal_config) - .unwrap_or_else(|e| error!("{self}: failed to update local state machine for pending update"; "err" => ?e)); + if self.reward_cycle <= current_reward_cycle { + self.local_state_machine.handle_pending_update(&self.signer_db, stacks_client, &self.proposal_config) + .unwrap_or_else(|e| error!("{self}: failed to update local state machine for pending update"; "err" => ?e)); + } match event { SignerEvent::BlockValidationResponse(block_validate_response) => { From 4dc33a7ece2bf38727584f44d73cafadae19de04 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 19 Mar 2025 12:49:52 -0500 Subject: [PATCH 161/238] test: fix transfer counting assertion assertion should not include phantom txs. timing changes around the signer status checks made this more likely. --- .../stacks-node/src/nakamoto_node/signer_coordinator.rs | 5 ++++- testnet/stacks-node/src/tests/signer/v0.rs | 7 +++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index b705fb4ddad..9a0a2050e92 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -298,7 +298,10 @@ impl SignerCoordinator { ); match res { - Err(NakamotoNodeError::SignatureTimeout) => continue, + Err(NakamotoNodeError::SignatureTimeout) => { + info!("Block proposal signing process timed out, resending the same proposal"); + continue; + } _ => return res, } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 599bf476d20..8cc06147d79 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -35,7 +35,7 @@ use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::LeaderBlockCommitOp; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use stacks::chainstate::stacks::address::PoxAddress; +use stacks::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::miner::{TransactionEvent, TransactionSuccessEvent}; @@ -12105,7 +12105,10 @@ fn transfers_in_block(block: &serde_json::Value) -> usize { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::TokenTransfer(..) = &parsed.payload { - count += 1; + // don't count phantom unlock transactions (identified as transfers from the boot addr) + if !parsed.get_origin().address_testnet().is_boot_code_addr() { + count += 1; + } } } count From 12765de0664b55a204242e22007bbaab015f8ac2 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 19 Mar 2025 13:21:36 -0500 Subject: [PATCH 162/238] attempt to fix test flake by reverting the reorder of the signer status check response --- stacks-signer/src/runloop.rs | 64 +++++++++--------- testnet/stacks-node/src/tests/signer/mod.rs | 72 +++++++++++---------- 2 files changed, 72 insertions(+), 64 deletions(-) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 721c4c9ea1e..f8324173151 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -500,6 +500,38 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> "Running one pass for the signer. state={:?}, event={event:?}", self.state ); + + // This is the only event that we respond to from the outer signer runloop + if let Some(SignerEvent::StatusCheck) = event { + let state_info = StateInfo { + runloop_state: self.state, + reward_cycle_info: self.current_reward_cycle_info, + running_signers: self + .stacks_signers + .values() + .map(|s| s.reward_cycle()) + .collect(), + signer_state_machines: self + .stacks_signers + .iter() + .map(|(reward_cycle, signer)| { + let ConfiguredSigner::RegisteredSigner(ref signer) = signer else { + return (*reward_cycle, None); + }; + ( + *reward_cycle, + Some(signer.get_local_state_machine().clone()), + ) + }) + .collect(), + }; + info!("Signer status check requested: {state_info:?}"); + + if let Err(e) = res.send(state_info.into()) { + error!("Failed to send status check result: {e}."); + } + } + if self.state == State::Uninitialized { if let Err(e) = self.initialize_runloop() { error!("Failed to initialize signer runloop: {e}."); @@ -514,6 +546,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> warn!("Signer may have an outdated view of the network."); } } + let current_reward_cycle = self .current_reward_cycle_info .as_ref() @@ -534,37 +567,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> ); } - // This is the only event that we respond to from the outer signer runloop - if let Some(SignerEvent::StatusCheck) = event { - let state_info = StateInfo { - runloop_state: self.state, - reward_cycle_info: self.current_reward_cycle_info, - running_signers: self - .stacks_signers - .values() - .map(|s| s.reward_cycle()) - .collect(), - signer_state_machines: self - .stacks_signers - .iter() - .map(|(reward_cycle, signer)| { - let ConfiguredSigner::RegisteredSigner(ref signer) = signer else { - return (*reward_cycle, None); - }; - ( - *reward_cycle, - Some(signer.get_local_state_machine().clone()), - ) - }) - .collect(), - }; - info!("Signer status check requested: {state_info:?}"); - - if let Err(e) = res.send(state_info.into()) { - error!("Failed to send status check result: {e}."); - } - } - if self.state == State::NoRegisteredSigners && event.is_some() { let next_reward_cycle = current_reward_cycle.saturating_add(1); info!("Signer is not registered for the current reward cycle ({current_reward_cycle}). Reward set is not yet determined or signer is not registered for the upcoming reward cycle ({next_reward_cycle})."); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index cbcb7662e5c..30140989967 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -455,40 +455,46 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = signer_state.signer_state_machines.iter().map(|(rc, state)| { - (rc, state.is_some()) + // fetch all the state machines *twice* + // we do this because the state machines return before the signer runloop + // invokes run_one_pass(), which is necessary to handle any pending updates to + // the state machine. + // we get around this by just doing this twice + for _i in 0..2 { + wait_for(120, || { + states = self.get_all_states(); + Ok(states.iter().enumerate().all(|(ix, signer_state)| { + let Some(Some(state_machine)) = signer_state + .signer_state_machines + .iter() + .find_map(|(rc, state)| { + if current_rc % 2 == *rc { + Some(state.as_ref()) + } else { + None + } + }) + else { + let rcs_set: Vec<_> = signer_state.signer_state_machines.iter().map(|(rc, state)| { + (rc, state.is_some()) }).collect(); - warn!( - "Local state machine for signer #{ix} not set for reward cycle #{current_rc} yet"; - "burn_block_height" => info_cur.burn_block_height, - "rcs_set" => ?rcs_set - ); - return false; - }; - - let LocalStateMachine::Initialized(state_machine) = state_machine else { - warn!("Local state machine for signer #{ix} not initialized"); - return false; - }; - state_machine.burn_block_height >= info_cur.burn_block_height - })) - - }) - .expect("Timed out while waiting to fetch local state machines from the signer set"); + warn!( + "Local state machine for signer #{ix} not set for reward cycle #{current_rc} yet"; + "burn_block_height" => info_cur.burn_block_height, + "rcs_set" => ?rcs_set + ); + return false; + }; + + let LocalStateMachine::Initialized(state_machine) = state_machine else { + warn!("Local state machine for signer #{ix} not initialized"); + return false; + }; + state_machine.burn_block_height >= info_cur.burn_block_height + })) + }) + .expect("Timed out while waiting to fetch local state machines from the signer set"); + } let state_machines = states .into_iter() From 6e1835cd4e1d282788b31eca6df83911f62ccbca Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 19 Mar 2025 13:54:22 -0500 Subject: [PATCH 163/238] speed up bitcoind_forking test: allow miner to commit immediately --- testnet/stacks-node/src/tests/signer/v0.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 8cc06147d79..e1d42d475ed 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2236,6 +2236,7 @@ fn bitcoind_forking_test() { vec![(sender_addr, send_amt + send_fee)], |_| {}, |node_config| { + node_config.miner.block_commit_delay = Duration::from_secs(1); let epochs = node_config.burnchain.epochs.as_mut().unwrap(); epochs[StacksEpochId::Epoch30].end_height = 3_015; epochs[StacksEpochId::Epoch31].start_height = 3_015; From f8535978b34093aad4449646916bcaa7837a6cf4 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 19 Mar 2025 14:22:36 -0500 Subject: [PATCH 164/238] bump registration waits --- testnet/stacks-node/src/tests/signer/mod.rs | 8 ++++---- testnet/stacks-node/src/tests/signer/v0.rs | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 30140989967..b77e581c215 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -230,9 +230,9 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest { &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.counters.blocks_processed, ); - self.wait_for_registered(30); + self.wait_for_registered(); debug!("Signers initialized"); let current_burn_block_height = self @@ -279,7 +279,7 @@ impl SignerTest { &mut self.running_nodes.btc_regtest_controller, &self.running_nodes.counters.blocks_processed, ); - self.wait_for_registered(30); + self.wait_for_registered(); info!("Signers initialized"); self.run_until_epoch_3_boundary(); @@ -1832,7 +1832,7 @@ fn reloads_signer_set_in() { &mut signer_test.running_nodes.btc_regtest_controller, &signer_test.running_nodes.counters.blocks_processed, ); - signer_test.wait_for_registered(30); + signer_test.wait_for_registered(); info!("Signers initialized"); signer_test.run_until_epoch_3_boundary(); @@ -8677,7 +8677,7 @@ fn incoming_signers_ignore_block_proposals() { info!("------------------------- Test Mine Until Middle of Prepare Phase at Block Height {middle_of_prepare_phase} -------------------------"); signer_test.run_until_burnchain_height_nakamoto(timeout, middle_of_prepare_phase, num_signers); - signer_test.wait_for_registered_both_reward_cycles(30); + signer_test.wait_for_registered_both_reward_cycles(); let current_burnchain_height = signer_test .running_nodes @@ -8853,7 +8853,7 @@ fn outgoing_signers_ignore_block_proposals() { info!("------------------------- Test Mine Until Next Reward Cycle at Height {next_reward_cycle_height} -------------------------"); signer_test.run_until_burnchain_height_nakamoto(timeout, next_reward_cycle_height, num_signers); - signer_test.wait_for_registered_both_reward_cycles(30); + signer_test.wait_for_registered_both_reward_cycles(); let current_burnchain_height = signer_test .running_nodes @@ -9211,7 +9211,7 @@ fn injected_signatures_are_ignored_across_boundaries() { &signer_test.running_nodes.counters.blocks_processed, ); - signer_test.wait_for_registered_both_reward_cycles(60); + signer_test.wait_for_registered_both_reward_cycles(); info!("---- Mining to the next reward cycle (block {next_cycle_height}) -----",); signer_test.run_until_burnchain_height_nakamoto( From b2503f28941a251043ea36f084ac8ebdfb561dfc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 19 Mar 2025 15:25:39 -0400 Subject: [PATCH 165/238] fix: error in LruCache when evicting clean value --- stacks-common/src/util/lru_cache.rs | 60 +++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 58c694103b4..c108a4deb16 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -13,6 +13,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::fmt::Display; + use hashbrown::HashMap; /// Node in the doubly linked list @@ -24,6 +26,20 @@ struct Node { prev: usize, } +impl Display for Node { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}={} ({}) [prev={}, next={}]", + self.key, + self.value, + if self.dirty { "dirty" } else { "clean" }, + self.prev, + self.next + ) + } +} + /// LRU cache for account nonces pub struct LruCache { capacity: usize, @@ -37,6 +53,22 @@ pub struct LruCache { tail: usize, } +impl Display for LruCache { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!( + f, + "LruCache (capacity={}, head={}, tail={})", + self.capacity, self.head, self.tail + )?; + let mut curr = self.head; + while curr != self.capacity { + writeln!(f, " {}", self.order[curr])?; + curr = self.order[curr].next; + } + Ok(()) + } +} + impl LruCache { /// Create a new LRU cache with the given capacity pub fn new(capacity: usize) -> Self { @@ -116,12 +148,12 @@ impl LruCache { // Remove it from the cache self.cache.remove(&self.order[index].key); + // Replace the key with the new key, saving the old key + let replaced_key = std::mem::replace(&mut self.order[index].key, key.clone()); + // If it is dirty, save the key-value pair to return if self.order[index].dirty { - evicted = Some(( - std::mem::replace(&mut self.order[index].key, key.clone()), - self.order[index].value, - )); + evicted = Some((replaced_key, self.order[index].value)); } // Insert this new value into the cache @@ -252,4 +284,24 @@ mod tests { assert_eq!(flushed, vec![(2, 2), (1, 3)]); } + + #[test] + fn test_lru_cache_evict_clean() { + let mut cache = LruCache::new(2); + + assert!(cache.insert_with_dirty(0, 0, false).is_none()); + assert!(cache.insert_with_dirty(1, 1, false).is_none()); + assert!(cache.insert_with_dirty(2, 2, true).is_none()); + assert!(cache.insert_with_dirty(3, 3, true).is_none()); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .unwrap(); + + assert_eq!(flushed, [(3, 3), (2, 2)]); + } } From 08b4f289c10105b352bf4226408129b40d55c56a Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 20 Mar 2025 09:38:10 -0500 Subject: [PATCH 166/238] integrate with update message --- libsigner/src/v0/messages.rs | 204 ++++++++++++++++++--------- stacks-signer/src/chainstate.rs | 3 + stacks-signer/src/v0/signer_state.rs | 41 ++++++ 3 files changed, 180 insertions(+), 68 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 8d88ba3c7e9..b09fab75dac 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -542,47 +542,121 @@ impl StacksMessageCodec for MockBlock { } /// Message for update the Signer State infos -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct StateMachineUpdate { - burn_block: ConsensusHash, - burn_block_height: u64, - current_miner_pkh: Hash160, - parent_tenure_id: ConsensusHash, - parent_tenure_last_block: StacksBlockId, - parent_tenure_last_block_height: u64, - active_signer_protocol_version: u64, - local_supported_signer_protocol_version: u64, + /// The tip burn block (i.e., the latest bitcoin block) seen by this signer + pub burn_block: ConsensusHash, + /// The tip burn block height (i.e., the latest bitcoin block) seen by this signer + pub burn_block_height: u64, + /// The signer's view of who the current miner should be (and their tenure building info) + pub current_miner: StateMachineUpdateMinerState, + /// The active signing protocol version + pub active_signer_protocol_version: u64, + /// The highest supported signing protocol by the local signer + pub local_supported_signer_protocol_version: u64, +} + +/// Message for update the Signer State infos +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub enum StateMachineUpdateMinerState { + /// There is an active miner + ActiveMiner { + /// The pubkeyhash of the current miner's signing key + current_miner_pkh: Hash160, + /// The tenure ID of the current miner's active tenure + tenure_id: ConsensusHash, + /// The tenure that the current miner is building on top of + parent_tenure_id: ConsensusHash, + /// The last block of the parent tenure (which should be + /// the block that the next tenure starts from) + parent_tenure_last_block: StacksBlockId, + /// The height of the last block of the parent tenure (which should be + /// the block that the next tenure starts from) + parent_tenure_last_block_height: u64, + }, + /// The signer doesn't believe there's any valid miner + NoValidMiner, +} + +impl StateMachineUpdateMinerState { + fn get_variant_id(&self) -> u8 { + match self { + StateMachineUpdateMinerState::NoValidMiner => 0, + StateMachineUpdateMinerState::ActiveMiner { .. } => 1, + } + } +} + +impl StacksMessageCodec for StateMachineUpdateMinerState { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + self.get_variant_id().consensus_serialize(fd)?; + match self { + StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, + tenure_id, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + } => { + current_miner_pkh.consensus_serialize(fd)?; + tenure_id.consensus_serialize(fd)?; + parent_tenure_id.consensus_serialize(fd)?; + parent_tenure_last_block.consensus_serialize(fd)?; + parent_tenure_last_block_height.consensus_serialize(fd)?; + } + StateMachineUpdateMinerState::NoValidMiner => return Ok(()), + } + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let variant_id: u8 = read_next(fd)?; + match variant_id { + 0 => Ok(StateMachineUpdateMinerState::NoValidMiner), + 1 => { + let current_miner_pkh = read_next(fd)?; + let tenure_id = read_next(fd)?; + let parent_tenure_id = read_next(fd)?; + let parent_tenure_last_block = read_next(fd)?; + let parent_tenure_last_block_height = read_next(fd)?; + Ok(StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, + tenure_id, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + }) + } + other => Err(CodecError::DeserializeError(format!( + "Unexpect miner state variant in StateMachineUpdate: {other}" + ))), + } + } } impl StacksMessageCodec for StateMachineUpdate { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.burn_block)?; - write_next(fd, &self.burn_block_height)?; - write_next(fd, &self.current_miner_pkh)?; - write_next(fd, &self.parent_tenure_id)?; - write_next(fd, &self.parent_tenure_last_block)?; - write_next(fd, &self.parent_tenure_last_block_height)?; - write_next(fd, &self.active_signer_protocol_version)?; - write_next(fd, &self.local_supported_signer_protocol_version)?; + self.active_signer_protocol_version + .consensus_serialize(fd)?; + self.local_supported_signer_protocol_version + .consensus_serialize(fd)?; + self.burn_block.consensus_serialize(fd)?; + self.burn_block_height.consensus_serialize(fd)?; + self.current_miner.consensus_serialize(fd)?; Ok(()) } fn consensus_deserialize(fd: &mut R) -> Result { - let burn_block = read_next::(fd)?; - let burn_block_height = read_next::(fd)?; - let current_miner_pkh = read_next::(fd)?; - let parent_tenure_id = read_next::(fd)?; - let parent_tenure_last_block = read_next::(fd)?; - let parent_tenure_last_block_height = read_next::(fd)?; - let active_signer_protocol_version = read_next::(fd)?; - let local_supported_signer_protocol_version = read_next::(fd)?; + let active_signer_protocol_version = read_next(fd)?; + let local_supported_signer_protocol_version = read_next(fd)?; + let burn_block = read_next(fd)?; + let burn_block_height = read_next(fd)?; + let current_miner = read_next(fd)?; + Ok(Self { burn_block, burn_block_height, - current_miner_pkh, - parent_tenure_id, - parent_tenure_last_block, - parent_tenure_last_block_height, + current_miner, active_signer_protocol_version, local_supported_signer_protocol_version, }) @@ -2051,12 +2125,15 @@ mod test { let signer_message = StateMachineUpdate { burn_block: ConsensusHash([0x55; 20]), burn_block_height: 100, - current_miner_pkh: Hash160([0xab; 20]), - parent_tenure_id: ConsensusHash([0x22; 20]), - parent_tenure_last_block: StacksBlockId([0x33; 32]), - parent_tenure_last_block_height: 1, active_signer_protocol_version: 2, local_supported_signer_protocol_version: 3, + current_miner: StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh: Hash160([0xab; 20]), + tenure_id: ConsensusHash([0x44; 20]), + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), + parent_tenure_last_block_height: 1, + }, }; let mut bytes = vec![]; @@ -2064,14 +2141,16 @@ mod test { // check for raw content for avoiding regressions when structure changes let raw_signer_message: Vec<&[u8]> = vec![ + /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 2], + /* local_supported_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 3], /* burn_block*/ &[0x55; 20], /* burn_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 100], + /* current_miner_variant */ &[0x01], /* current_miner_pkh */ &[0xab; 20], + /* tenure_id*/ &[0x44; 20], /* parent_tenure_id*/ &[0x22; 20], /* parent_tenure_last_block */ &[0x33; 32], /* parent_tenure_last_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 1], - /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 2], - /* local_supported_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 3], ]; assert_eq!(bytes, raw_signer_message.concat()); @@ -2079,44 +2158,33 @@ mod test { let signer_message_deserialized = StateMachineUpdate::consensus_deserialize(&mut &bytes[..]).unwrap(); - assert_eq!( - signer_message.burn_block, - signer_message_deserialized.burn_block - ); + assert_eq!(signer_message, signer_message_deserialized); - assert_eq!( - signer_message.burn_block_height, - signer_message_deserialized.burn_block_height - ); - - assert_eq!( - signer_message.current_miner_pkh, - signer_message_deserialized.current_miner_pkh - ); + let signer_message = StateMachineUpdate { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + active_signer_protocol_version: 2, + local_supported_signer_protocol_version: 3, + current_miner: StateMachineUpdateMinerState::NoValidMiner, + }; - assert_eq!( - signer_message.parent_tenure_id, - signer_message_deserialized.parent_tenure_id - ); + let mut bytes = vec![]; + signer_message.consensus_serialize(&mut bytes).unwrap(); - assert_eq!( - signer_message.parent_tenure_last_block, - signer_message_deserialized.parent_tenure_last_block - ); + // check for raw content for avoiding regressions when structure changes + let raw_signer_message: Vec<&[u8]> = vec![ + /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 2], + /* local_supported_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 3], + /* burn_block*/ &[0x55; 20], + /* burn_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 100], + /* current_miner_variant */ &[0x00], + ]; - assert_eq!( - signer_message.parent_tenure_last_block_height, - signer_message_deserialized.parent_tenure_last_block_height - ); + assert_eq!(bytes, raw_signer_message.concat()); - assert_eq!( - signer_message.active_signer_protocol_version, - signer_message_deserialized.active_signer_protocol_version - ); + let signer_message_deserialized = + StateMachineUpdate::consensus_deserialize(&mut &bytes[..]).unwrap(); - assert_eq!( - signer_message.local_supported_signer_protocol_version, - signer_message_deserialized.local_supported_signer_protocol_version - ); + assert_eq!(signer_message, signer_message_deserialized); } } diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index ad36730d0c7..4bbb3bfa4a2 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -42,6 +42,9 @@ pub enum SignerChainstateError { /// The signer could not find information about the parent tenure #[error("No information available for parent tenure '{0}'")] NoParentTenureInfo(ConsensusHash), + /// The signer could not find information about the parent tenure + #[error("The local state machine is not ready, so no update message can be produced")] + LocalStateMachineNotReady, } impl From for RejectReason { diff --git a/stacks-signer/src/v0/signer_state.rs b/stacks-signer/src/v0/signer_state.rs index 680e42d73b9..0559f716f9d 100644 --- a/stacks-signer/src/v0/signer_state.rs +++ b/stacks-signer/src/v0/signer_state.rs @@ -17,6 +17,9 @@ use std::time::{Duration, UNIX_EPOCH}; use blockstack_lib::chainstate::burn::ConsensusHashExtensions; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use libsigner::v0::messages::{ + StateMachineUpdate as StateMachineUpdateMessage, StateMachineUpdateMinerState, +}; use serde::{Deserialize, Serialize}; use slog::{slog_info, slog_warn}; use stacks_common::bitvec::BitVec; @@ -31,6 +34,9 @@ use crate::chainstate::{ use crate::client::{ClientError, CurrentAndLastSortition, StacksClient}; use crate::signerdb::SignerDb; +/// This is the latest supported protocol version for this signer binary +pub static SUPPORTED_SIGNER_PROTOCOL_VERSION: u64 = 1; + /// A signer state machine view. This struct can /// be used to encode the local signer's view or /// the global view. @@ -93,6 +99,41 @@ pub enum StateMachineUpdate { BurnBlock(u64), } +impl TryInto for &LocalStateMachine { + type Error = SignerChainstateError; + + fn try_into(self) -> Result { + let LocalStateMachine::Initialized(state_machine) = self else { + return Err(SignerChainstateError::LocalStateMachineNotReady); + }; + + let current_miner = match state_machine.current_miner { + MinerState::ActiveMiner { + current_miner_pkh, + tenure_id, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + } => StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, + tenure_id, + parent_tenure_id, + parent_tenure_last_block, + parent_tenure_last_block_height, + }, + MinerState::NoValidMiner => StateMachineUpdateMinerState::NoValidMiner, + }; + + Ok(StateMachineUpdateMessage { + burn_block: state_machine.burn_block, + burn_block_height: state_machine.burn_block_height, + current_miner, + active_signer_protocol_version: state_machine.active_signer_protocol_version, + local_supported_signer_protocol_version: SUPPORTED_SIGNER_PROTOCOL_VERSION, + }) + } +} + impl LocalStateMachine { /// Initialize a local state machine by querying the local stacks-node /// and signerdb for the current sortition information From d9ff63c7daf00d292c5bd0e9367a1fa98f495ef6 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 20 Mar 2025 10:55:28 -0400 Subject: [PATCH 167/238] fix: reset nonce cache when assembled block is not accepted --- stackslib/src/chainstate/stacks/miner.rs | 1 - .../stacks-node/src/nakamoto_node/miner.rs | 19 +++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 6cffb4daa5d..2e1b5030ef3 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2287,7 +2287,6 @@ impl StacksBlockBuilder { } } - // TODO: Should we fill in missing nonces here too? mempool.estimate_tx_rates(100, &block_limit, &stacks_epoch_id)?; let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 81778540ed3..5ba77f584fe 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -189,6 +189,8 @@ pub struct BlockMinerThread { burnchain: Burnchain, /// Last block mined last_block_mined: Option, + /// Hash of the last block assembled + last_block_assembled: Option, /// Number of blocks mined since a tenure change/extend was attempted mined_blocks: u64, /// Cost consumed by the current tenure @@ -243,6 +245,7 @@ impl BlockMinerThread { keychain: rt.keychain.clone(), burnchain: rt.burnchain.clone(), last_block_mined: None, + last_block_assembled: None, mined_blocks: 0, registered_key, burn_election_block, @@ -512,6 +515,19 @@ impl BlockMinerThread { return Err(NakamotoNodeError::StacksTipChanged); } + if self.last_block_assembled.is_none() + || self.last_block_assembled + != self.last_block_mined.as_ref().map(|block| block.block_id()) + { + // Reset the nonce cache, since it is only valid if we assembled + // the last block successfully. + let mut mem_pool = self + .config + .connect_mempool_db() + .expect("Database failure opening mempool"); + mem_pool.reset_nonce_cache()?; + } + let new_block = loop { // If we're mock mining, we may not have processed the block that the // actual tenure winner committed to yet. So, before attempting to @@ -540,6 +556,8 @@ impl BlockMinerThread { match self.mine_block(coordinator) { Ok(x) => { + self.last_block_assembled = Some(x.block_id()); + if !self.validate_timestamp(&x)? { info!("Block mined too quickly. Will try again."; "block_timestamp" => x.header.timestamp, @@ -557,6 +575,7 @@ impl BlockMinerThread { } info!("Miner interrupted while mining, will try again"); + self.last_block_assembled = None; // sleep, and try again. if the miner was interrupted because the burnchain // view changed, the next `mine_block()` invocation will error thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); From 883fbac2058d2f2e4f7c8372a6bca29fd4e979c1 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 20 Mar 2025 16:26:59 -0400 Subject: [PATCH 168/238] fix: nonce cache reset logic --- .../stacks-node/src/nakamoto_node/miner.rs | 44 ++++++++----------- 1 file changed, 18 insertions(+), 26 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 5ba77f584fe..d0cb13521c7 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -189,8 +189,6 @@ pub struct BlockMinerThread { burnchain: Burnchain, /// Last block mined last_block_mined: Option, - /// Hash of the last block assembled - last_block_assembled: Option, /// Number of blocks mined since a tenure change/extend was attempted mined_blocks: u64, /// Cost consumed by the current tenure @@ -226,6 +224,8 @@ pub struct BlockMinerThread { burn_tip_at_start: ConsensusHash, /// flag to indicate an abort driven from the relayer abort_flag: Arc, + /// Should the nonce cache be reset before mining the next block? + reset_nonce_cache: bool, } impl BlockMinerThread { @@ -245,7 +245,6 @@ impl BlockMinerThread { keychain: rt.keychain.clone(), burnchain: rt.burnchain.clone(), last_block_mined: None, - last_block_assembled: None, mined_blocks: 0, registered_key, burn_election_block, @@ -260,6 +259,7 @@ impl BlockMinerThread { abort_flag: Arc::new(AtomicBool::new(false)), tenure_cost: ExecutionCost::ZERO, tenure_budget: ExecutionCost::ZERO, + reset_nonce_cache: true, } } @@ -442,13 +442,6 @@ impl BlockMinerThread { )) })?; - // Reset the nonce cache, since it is only updated while mining - let mut mem_pool = self - .config - .connect_mempool_db() - .expect("Database failure opening mempool"); - mem_pool.reset_nonce_cache()?; - // now, actually run this tenure loop { if let Err(e) = self.miner_main_loop( @@ -515,20 +508,15 @@ impl BlockMinerThread { return Err(NakamotoNodeError::StacksTipChanged); } - if self.last_block_assembled.is_none() - || self.last_block_assembled - != self.last_block_mined.as_ref().map(|block| block.block_id()) - { - // Reset the nonce cache, since it is only valid if we assembled - // the last block successfully. - let mut mem_pool = self - .config - .connect_mempool_db() - .expect("Database failure opening mempool"); - mem_pool.reset_nonce_cache()?; - } - let new_block = loop { + if self.reset_nonce_cache { + let mut mem_pool = self + .config + .connect_mempool_db() + .expect("Database failure opening mempool"); + mem_pool.reset_nonce_cache()?; + } + // If we're mock mining, we may not have processed the block that the // actual tenure winner committed to yet. So, before attempting to // mock mine, check if the parent is processed. @@ -556,8 +544,6 @@ impl BlockMinerThread { match self.mine_block(coordinator) { Ok(x) => { - self.last_block_assembled = Some(x.block_id()); - if !self.validate_timestamp(&x)? { info!("Block mined too quickly. Will try again."; "block_timestamp" => x.header.timestamp, @@ -575,7 +561,7 @@ impl BlockMinerThread { } info!("Miner interrupted while mining, will try again"); - self.last_block_assembled = None; + // sleep, and try again. if the miner was interrupted because the burnchain // view changed, the next `mine_block()` invocation will error thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); @@ -583,6 +569,7 @@ impl BlockMinerThread { } Err(NakamotoNodeError::MiningFailure(ChainstateError::NoTransactionsToMine)) => { debug!("Miner did not find any transactions to mine"); + self.reset_nonce_cache = false; break None; } Err(e) => { @@ -1279,6 +1266,11 @@ impl BlockMinerThread { return Err(ChainstateError::MinerAborted.into()); } + // If we attempt to build a block, we should reset the nonce cache. + // In the special case where no transactions are found, this flag will + // be reset to false. + self.reset_nonce_cache = true; + // build the block itself let mut block_metadata = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, From e4271b338ee02c03466266b5cb0043caebc22b29 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 20 Mar 2025 16:27:30 -0400 Subject: [PATCH 169/238] test: fix large mempool test logic --- .../src/tests/nakamoto_integrations.rs | 24 +++++++++++-------- testnet/stacks-node/src/tests/signer/v0.rs | 24 +++++++++++-------- 2 files changed, 28 insertions(+), 20 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 30154d4839e..7953df50035 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -11278,14 +11278,17 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) // These 10 accounts will send to 25 accounts each, then those 260 accounts // will send to 25 accounts each, for a total of 6760 accounts. // At the end of the funding round, we want to have 6760 accounts with - // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. - // With a fee of 180 uSTX per send, we need each account to end up with - // 181 * 25 * 2 = 9_050 uSTX. - // The 260 accounts in the middle will need to have - // (9050 + 180) * 26 = 239_980 uSTX. - // The 10 initial accounts will need to have - // (239980 + 180) * 26 = 6_244_160 uSTX. - let initial_balance = 6_244_160; + // enough balance to send 1 uSTX 25 times. + // With a fee of 180 to 2000 uSTX per send, we need each account to have + // 2001 * 25 = 50_025 uSTX. + // The 260 accounts in the middle will need to have enough to send that + // amount to 25 other accounts, plus the fee, and then enough to send the + // transfers themselves as well: + // (50025 + 180) * 25 + 50025 = 1_305_150 uSTX. + // The 10 initial accounts will need to have enough to send that amount to + // 25 other accounts, plus enough to send the transfers themselves as well: + // (1305150 + 180) * 25 + 1305150 = 33_938_400 uSTX. + let initial_balance = 33_938_400; for addr in initial_sender_addrs.iter() { naka_conf.add_initial_balance(PrincipalData::from(*addr).to_string(), initial_balance); } @@ -11362,7 +11365,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) transfer_fee, naka_conf.burnchain.chain_id, &recipient_addr.into(), - 239_980, + 1_305_150, ); insert_tx_in_mempool( &db_tx, @@ -11418,7 +11421,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) transfer_fee, naka_conf.burnchain.chain_id, &recipient_addr.into(), - 9_050, + 50_025, ); insert_tx_in_mempool( &db_tx, @@ -11472,6 +11475,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) for (sender_sk, nonce) in senders.iter_mut() { let sender_addr = tests::to_addr(sender_sk); let fee = set_fee(); + assert!(fee >= 180 && fee <= 2000); let transfer_tx = make_stacks_transfer( sender_sk, *nonce, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index da08e40884a..6fee616edd5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12475,14 +12475,17 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) // These 10 accounts will send to 25 accounts each, then those 260 accounts // will send to 25 accounts each, for a total of 6760 accounts. // At the end of the funding round, we want to have 6760 accounts with - // enough balance to send 1 uSTX 25 times for each of 2 rounds of sends. - // With a fee of 180 uSTX per send, we need each account to end up with - // 181 * 25 * 2 = 9_050 uSTX. - // The 260 accounts in the middle will need to have - // (9050 + 180) * 26 = 239_980 uSTX. - // The 10 initial accounts will need to have - // (239980 + 180) * 26 = 6_244_160 uSTX. - let initial_balance = 6_244_160; + // enough balance to send 1 uSTX 25 times. + // With a fee of 180 to 2000 uSTX per send, we need each account to have + // 2001 * 25 = 50_025 uSTX. + // The 260 accounts in the middle will need to have enough to send that + // amount to 25 other accounts, plus the fee, and then enough to send the + // transfers themselves as well: + // (50025 + 180) * 25 + 50025 = 1_305_150 uSTX. + // The 10 initial accounts will need to have enough to send that amount to + // 25 other accounts, plus enough to send the transfers themselves as well: + // (1305150 + 180) * 25 + 1305150 = 33_938_400 uSTX. + let initial_balance = 33_938_400; let initial_balances = initial_sender_addrs .iter() .map(|addr| (addr.clone(), initial_balance)) @@ -12538,7 +12541,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) transfer_fee, chain_id, &recipient_addr.into(), - 239_980, + 1_305_150, ); insert_tx_in_mempool( &db_tx, @@ -12594,7 +12597,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) transfer_fee, chain_id, &recipient_addr.into(), - 9_050, + 50_025, ); insert_tx_in_mempool( &db_tx, @@ -12648,6 +12651,7 @@ fn large_mempool_base(strategy: MemPoolWalkStrategy, set_fee: impl Fn() -> u64) for (sender_sk, nonce) in senders.iter_mut() { let sender_addr = tests::to_addr(sender_sk); let fee = set_fee(); + assert!(fee >= 180 && fee <= 2000); let transfer_tx = make_stacks_transfer(sender_sk, *nonce, fee, chain_id, &recipient, 1); insert_tx_in_mempool( &db_tx, From c110f0df347a54561313d812c4d6a39594dc397e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 20 Mar 2025 16:02:27 -0500 Subject: [PATCH 170/238] use prefix_hex for ser/deser * simplify multiple_miners_empty_sortition and single_miner_empty_sortition tests --- libsigner/src/events.rs | 76 ++--- stacks-signer/src/v0/signer.rs | 4 + stackslib/src/net/api/mod.rs | 3 +- testnet/stacks-node/src/tests/signer/mod.rs | 61 +++- testnet/stacks-node/src/tests/signer/v0.rs | 330 ++++++++------------ 5 files changed, 217 insertions(+), 257 deletions(-) diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index b0dc1b3bc83..ad0583a0576 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -29,6 +29,7 @@ use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; +use blockstack_lib::net::api::{prefix_hex, prefix_opt_hex}; use blockstack_lib::net::stackerdb::MINER_SLOT_COUNT; use blockstack_lib::util_lib::boot::boot_code_id; use blockstack_lib::version_string; @@ -215,8 +216,9 @@ pub enum SignerEvent { /// The consensus hash of the block (either the tenure it was produced during for Stacks 3.0 /// or the burn block that won the sortition in Stacks 2.0) consensus_hash: ConsensusHash, - /// The signer sighash for the newly processed stacks block - signer_sighash: Sha512Trunc256Sum, + /// The signer sighash for the newly processed stacks block. If the newly processed block is a 2.0 + /// block, there is *no* signer sighash + signer_sighash: Option, /// The block height for the newly processed stacks block block_height: u64, }, @@ -559,50 +561,39 @@ impl TryFrom for SignerEvent { #[derive(Debug, Deserialize)] struct BurnBlockEvent { - burn_block_hash: String, + #[serde(with = "prefix_hex")] + burn_block_hash: BurnchainHeaderHash, burn_block_height: u64, reward_recipients: Vec, reward_slot_holders: Vec, burn_amount: u64, - consensus_hash: String, + #[serde(with = "prefix_hex")] + consensus_hash: ConsensusHash, } impl TryFrom for SignerEvent { type Error = EventError; fn try_from(burn_block_event: BurnBlockEvent) -> Result { - let burn_header_hash = burn_block_event - .burn_block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - BurnchainHeaderHash::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - - let consensus_hash = burn_block_event - .consensus_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - ConsensusHash::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - Ok(SignerEvent::NewBurnBlock { burn_height: burn_block_event.burn_block_height, received_time: SystemTime::now(), - burn_header_hash, - consensus_hash, + burn_header_hash: burn_block_event.burn_block_hash, + consensus_hash: burn_block_event.consensus_hash, }) } } #[derive(Debug, Deserialize)] struct BlockEvent { - index_block_hash: String, - signer_signature_hash: String, - consensus_hash: String, + #[serde(with = "prefix_hex")] + index_block_hash: StacksBlockId, + #[serde(with = "prefix_opt_hex")] + signer_signature_hash: Option, + #[serde(with = "prefix_hex")] + consensus_hash: ConsensusHash, + #[serde(with = "prefix_hex")] + block_hash: BlockHeaderHash, block_height: u64, } @@ -610,35 +601,10 @@ impl TryFrom for SignerEvent { type Error = EventError; fn try_from(block_event: BlockEvent) -> Result { - let signer_sighash = block_event - .signer_signature_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - Sha512Trunc256Sum::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - let consensus_hash = block_event - .consensus_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - ConsensusHash::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - let block_id = block_event - .index_block_hash - .get(2..) - .ok_or_else(|| EventError::Deserialize("Hex string should be 0x prefixed".into())) - .and_then(|hex| { - StacksBlockId::from_hex(hex) - .map_err(|e| EventError::Deserialize(format!("Invalid hex string: {e}"))) - })?; - Ok(SignerEvent::NewBlock { - block_id, - signer_sighash, - consensus_hash, + signer_sighash: block_event.signer_signature_hash, + block_id: block_event.index_block_hash, + consensus_hash: block_event.consensus_hash, block_height: block_event.block_height, }) } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 8903cc3c852..258d88ec8b7 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -341,6 +341,10 @@ impl SignerTrait for Signer { consensus_hash, signer_sighash, } => { + let Some(signer_sighash) = signer_sighash else { + debug!("{self}: received a new block event for a pre-nakamoto block, no processing necessary"); + return; + }; debug!( "{self}: Received a new block event."; "block_id" => %block_id, diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index cff12f2242a..24937bbd0a9 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -20,7 +20,7 @@ use stacks_common::codec::read_next; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, SortitionId, StacksBlockId, }; -use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::HexError; use crate::burnchains::Txid; @@ -244,3 +244,4 @@ impl_hex_deser!(VRFSeed); impl_hex_deser!(ConsensusHash); impl_hex_deser!(BlockHeaderHash); impl_hex_deser!(Hash160); +impl_hex_deser!(Sha512Trunc256Sum); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index b77e581c215..a27b2e11281 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -50,11 +50,11 @@ use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::v0::signer_state::{LocalStateMachine, MinerState}; use stacks_signer::{Signer, SpawnedSigner}; -use super::make_stacks_transfer; use super::nakamoto_integrations::{ check_nakamoto_empty_block_heuristics, next_block_and, wait_for, }; use super::neon_integrations::{get_account, get_sortition_info_ch, submit_tx_fallible}; +use super::{make_contract_call, make_contract_publish, make_stacks_transfer}; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; @@ -448,6 +448,65 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Result { + let http_origin = format!("http://{}", &self.running_nodes.conf.node.rpc_bind); + let sender_addr = to_addr(&sender_sk); + let sender_nonce = get_account(&http_origin, &sender_addr).nonce; + let burn_height_contract = " + (define-data-var local-burn-block-ht uint u0) + (define-public (run-update) + (ok (var-set local-burn-block-ht burn-block-height))) + "; + let contract_tx = make_contract_publish( + &sender_sk, + 0, + 1000, + self.running_nodes.conf.burnchain.chain_id, + "burn-height-local", + burn_height_contract, + ); + let txid = submit_tx_fallible(&http_origin, &contract_tx)?; + + wait_for(120, || { + let next_nonce = get_account(&http_origin, &sender_addr).nonce; + Ok(next_nonce > sender_nonce) + }) + .map(|()| txid) + } + + /// Submit a burn block dependent contract-call + /// and wait until it is included in a block + pub fn submit_burn_block_call_and_wait( + &mut self, + sender_sk: &StacksPrivateKey, + ) -> Result { + let http_origin = format!("http://{}", &self.running_nodes.conf.node.rpc_bind); + let sender_addr = to_addr(&sender_sk); + let sender_nonce = get_account(&http_origin, &sender_addr).nonce; + let contract_call_tx = make_contract_call( + &sender_sk, + sender_nonce, + 1000, + self.running_nodes.conf.burnchain.chain_id, + &sender_addr, + "burn-height-local", + "run-update", + &[], + ); + let txid = submit_tx_fallible(&http_origin, &contract_call_tx)?; + + wait_for(120, || { + let next_nonce = get_account(&http_origin, &sender_addr).nonce; + Ok(next_nonce > sender_nonce) + }) + .map(|()| txid) + } + /// Get the local state machines and most recent peer info from the stacks-node, /// waiting until all of the signers have updated their state machines to /// reflect the most recent burn block. diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 153dbb03f9a..7097f0d3a74 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -94,9 +94,8 @@ use crate::tests::nakamoto_integrations::{ POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ - get_account, get_chain_info, get_chain_info_opt, get_pox_info, get_sortition_info, - get_sortition_info_ch, next_block_and_wait, run_until_burnchain_height, submit_tx, - submit_tx_fallible, test_observer, + get_account, get_chain_info, get_chain_info_opt, get_sortition_info, get_sortition_info_ch, + next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; use crate::tests::{ self, gen_random_port, make_contract_call, make_contract_publish, make_stacks_transfer, @@ -460,7 +459,6 @@ impl SignerTest { pub struct MultipleMinerTest { signer_test: SignerTest, sender_sk: Secp256k1PrivateKey, - sender_nonce: u64, send_amt: u64, send_fee: u64, conf_node_2: NeonConfig, @@ -604,7 +602,6 @@ impl MultipleMinerTest { MultipleMinerTest { signer_test, sender_sk, - sender_nonce: 0, send_amt, send_fee, conf_node_2, @@ -736,33 +733,28 @@ impl MultipleMinerTest { ) } - /// Sends a transfer tx to the stacks node and returns the txid - pub fn send_transfer_tx(&mut self) -> String { - let http_origin = format!( + /// Sends a transfer tx to the stacks node and returns the txid and nonce used + pub fn send_transfer_tx(&mut self) -> (String, u64) { + self.signer_test + .submit_transfer_tx(&self.sender_sk, self.send_fee, self.send_amt) + .unwrap() + } + + fn node_http(&self) -> String { + format!( "http://{}", &self.signer_test.running_nodes.conf.node.rpc_bind - ); - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &self.sender_sk, - self.sender_nonce, - self.send_fee, - self.signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - self.send_amt, - ); - self.sender_nonce += 1; - submit_tx(&http_origin, &transfer_tx) + ) } /// Sends a transfer tx to the stacks node and waits for the stacks node to mine it /// Returns the txid of the transfer tx. pub fn send_and_mine_transfer_tx(&mut self, timeout_secs: u64) -> Result { - let stacks_height_before = self.get_peer_stacks_tip_height(); - let txid = self.send_transfer_tx(); + let (txid, nonce) = self.send_transfer_tx(); + let http_origin = self.node_http(); + let sender_addr = tests::to_addr(&self.sender_sk); wait_for(timeout_secs, || { - Ok(self.get_peer_stacks_tip_height() > stacks_height_before) + Ok(get_account(&http_origin, &sender_addr).nonce > nonce) })?; Ok(txid) } @@ -824,6 +816,20 @@ impl MultipleMinerTest { self.rl2_counters.naka_skip_commit_op.set(true); } + /// Pause miner 1's commits + pub fn pause_commits_miner_1(&mut self) { + self.signer_test + .running_nodes + .counters + .naka_skip_commit_op + .set(true); + } + + /// Pause miner 2's commits + pub fn pause_commits_miner_2(&mut self) { + self.rl2_counters.naka_skip_commit_op.set(true); + } + /// Ensures that miner 1 submits a commit pointing to the current view reported by the stacks node as expected pub fn submit_commit_miner_1(&mut self, sortdb: &SortitionDB) { if !self @@ -9772,11 +9778,11 @@ fn fast_sortition() { #[test] #[ignore] /// This test spins up two nakamoto nodes, both configured to mine. -/// After Nakamoto blocks are mined, it waits for a normal tenure, then issues +/// After Nakamoto blocks are mined, it issues a normal tenure, then issues /// two bitcoin blocks in quick succession -- the first will contain block commits, /// and the second "flash block" will contain no block commits. -/// The test checks if the winner of the first block is different than the previous tenure. -/// If so, it performs the actual test: asserting that the miner wakes up and produces valid blocks. +/// The test asserts that the winner of the first block is different than the previous tenure. +/// and performs the actual test: asserting that the miner wakes up and produces valid blocks. /// This test uses the burn-block-height to ensure consistent calculation of the burn view between /// the miner thread and the block processor fn multiple_miners_empty_sortition() { @@ -9785,164 +9791,120 @@ fn multiple_miners_empty_sortition() { } let num_signers = 5; - let mut miners = MultipleMinerTest::new(num_signers, 60); + let mut miners = MultipleMinerTest::new_with_config_modifications( + num_signers, + 60, + |signer_config| { + // We don't want the miner of the "inactive" sortition before the flash block + // to get timed out. + signer_config.block_proposal_timeout = Duration::from_secs(600); + }, + |_| {}, + |_| {}, + ); - let (conf_1, conf_2) = miners.get_node_configs(); + let (conf_1, _conf_2) = miners.get_node_configs(); - let rl1_commits = miners - .signer_test - .running_nodes - .counters - .naka_submitted_commits - .clone(); let rl1_counters = miners.signer_test.running_nodes.counters.clone(); - let rl2_commits = miners.rl2_counters.naka_submitted_commits.clone(); - let rl2_counters = miners.rl2_counters.clone(); + let sortdb = SortitionDB::open( + &conf_1.get_burn_db_file_path(), + false, + conf_1.get_burnchain().pox_constants, + ) + .unwrap(); - let sender_addr = tests::to_addr(&miners.sender_sk); + miners.pause_commits_miner_2(); + let (mining_pkh_1, mining_pkh_2) = miners.get_miner_public_key_hashes(); miners.boot_to_epoch_3(); - let burn_height_contract = " - (define-data-var local-burn-block-ht uint u0) - (define-public (run-update) - (ok (var-set local-burn-block-ht burn-block-height))) - "; - - let contract_tx = make_contract_publish( - &miners.sender_sk, - miners.sender_nonce, - 1000, - conf_1.burnchain.chain_id, - "burn-height-local", - burn_height_contract, - ); - submit_tx(&conf_1.node.data_url, &contract_tx); - miners.sender_nonce += 1; + let info = get_chain_info(&conf_1); - let last_sender_nonce = loop { - // Mine 1 nakamoto tenures - info!("Mining tenure..."); - - miners.signer_test.mine_block_wait_on_processing( - &[&conf_1, &conf_2], - &[&rl1_counters, &rl2_counters], - Duration::from_secs(30), - ); - - // mine the interim blocks - for _ in 0..2 { - let sender_nonce = get_account(&conf_1.node.data_url, &sender_addr).nonce; - // check if the burn contract is already produced, if not wait for it to be included in - // an interim block - if sender_nonce >= 1 { - let contract_call_tx = make_contract_call( - &miners.sender_sk, - sender_nonce, - miners.send_fee, - conf_1.burnchain.chain_id, - &sender_addr, - "burn-height-local", - "run-update", - &[], - ); - submit_tx(&conf_1.node.data_url, &contract_call_tx); - } + miners + .signer_test + .submit_burn_block_contract_and_wait(&miners.sender_sk) + .expect("Timed out waiting for contract publish"); - // make sure the sender's tx gets included (whether it was the contract publish or call) - wait_for(60, || { - let next_sender_nonce = get_account(&conf_1.node.data_url, &sender_addr).nonce; - Ok(next_sender_nonce > sender_nonce) - }) - .unwrap(); - } + wait_for(60, || { + Ok( + rl1_counters.naka_submitted_commit_last_burn_height.get() >= info.burn_block_height + && rl1_counters.naka_submitted_commit_last_stacks_tip.get() + >= info.stacks_tip_height, + ) + }) + .expect("Timed out waiting for commits from Miner 1 for Tenure 1 of the test"); - let last_active_sortition = get_sortition_info(&conf_1); - assert!(last_active_sortition.was_sortition); + for _ in 0..2 { + miners + .signer_test + .submit_burn_block_call_and_wait(&miners.sender_sk) + .expect("Timed out waiting for contract-call"); + } - // check if we're about to cross a reward cycle boundary -- if so, we can't - // perform this test, because we can't tenure extend across the boundary - let pox_info = get_pox_info(&conf_1.node.data_url).unwrap(); - let blocks_until_next_cycle = pox_info.next_cycle.blocks_until_reward_phase; - if blocks_until_next_cycle == 1 { - info!("We're about to cross a reward cycle boundary, cannot perform a tenure extend here!"); - continue; - } + let tenure_0_stacks_height = get_chain_info(&conf_1).stacks_tip_height; + miners.pause_commits_miner_1(); + miners.signer_test.mine_bitcoin_block(); + miners.signer_test.check_signer_states_normal(); + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert_eq!(tip_sn.miner_pk_hash, Some(mining_pkh_1)); - // lets mine a btc flash block - let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); - let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); - let info_before = get_chain_info(&conf_1); + wait_for(60, || { + Ok(get_chain_info(&conf_1).stacks_tip_height > tenure_0_stacks_height) + }) + .expect("Timed out waiting for Miner 1 to mine the first block of Tenure 1"); + miners.submit_commit_miner_2(&sortdb); - miners.btc_regtest_controller_mut().build_next_block(2); + for _ in 0..2 { + miners + .signer_test + .submit_burn_block_call_and_wait(&miners.sender_sk) + .expect("Timed out waiting for contract-call"); + } - wait_for(60, || { - let info = get_chain_info(&conf_1); - Ok(info.burn_block_height >= 2 + info_before.burn_block_height - && rl2_commits.load(Ordering::SeqCst) > rl2_commits_before - && rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) - }) - .unwrap(); + let last_active_sortition = get_sortition_info(&conf_1); + assert!(last_active_sortition.was_sortition); - let cur_empty_sortition = get_sortition_info(&conf_1); - assert!(!cur_empty_sortition.was_sortition); - let inactive_sortition = get_sortition_info_ch( - &conf_1, - cur_empty_sortition.last_sortition_ch.as_ref().unwrap(), - ); - assert!(inactive_sortition.was_sortition); - assert_eq!( - inactive_sortition.burn_block_height, - last_active_sortition.burn_block_height + 1 - ); + let tenure_1_info = get_chain_info(&conf_1); + info!("Mining flash block!"); + miners.btc_regtest_controller_mut().build_next_block(2); - info!("==================== Mined a flash block ===================="); - info!("Flash block sortition info"; - "last_active_winner" => ?last_active_sortition.miner_pk_hash160, - "last_winner" => ?inactive_sortition.miner_pk_hash160, - "last_active_ch" => %last_active_sortition.consensus_hash, - "last_winner_ch" => %inactive_sortition.consensus_hash, - "cur_empty_sortition" => %cur_empty_sortition.consensus_hash, - ); + wait_for(60, || { + let info = get_chain_info(&conf_1); + Ok(info.burn_block_height >= 2 + tenure_1_info.burn_block_height) + }) + .expect("Timed out waiting for the flash blocks to be processed by the stacks nodes"); - if last_active_sortition.miner_pk_hash160 != inactive_sortition.miner_pk_hash160 { - info!( - "==================== Mined a flash block with changed miners ====================" - ); - break get_account(&conf_1.node.data_url, &sender_addr).nonce; - } - }; + let cur_empty_sortition = get_sortition_info(&conf_1); + assert!(!cur_empty_sortition.was_sortition); + let inactive_sortition = get_sortition_info_ch( + &conf_1, + cur_empty_sortition.last_sortition_ch.as_ref().unwrap(), + ); + assert!(inactive_sortition.was_sortition); + assert_eq!( + inactive_sortition.burn_block_height, + last_active_sortition.burn_block_height + 1 + ); + assert_eq!( + inactive_sortition.miner_pk_hash160, + Some(mining_pkh_2), + "Miner 2 should have won the inactive sortition" + ); // after the flash block, make sure we get block processing without a new bitcoin block // being mined. - for _ in 0..2 { - let sender_nonce = get_account(&conf_1.node.data_url, &sender_addr).nonce; - let contract_call_tx = make_contract_call( - &miners.sender_sk, - sender_nonce, - miners.send_fee, - conf_1.burnchain.chain_id, - &sender_addr, - "burn-height-local", - "run-update", - &[], - ); - submit_tx(&conf_1.node.data_url, &contract_call_tx); - - wait_for(60, || { - let next_sender_nonce = get_account(&conf_1.node.data_url, &sender_addr).nonce; - Ok(next_sender_nonce > sender_nonce) - }) - .unwrap(); + miners + .signer_test + .submit_burn_block_call_and_wait(&miners.sender_sk) + .expect("Timed out waiting for contract-call"); } - assert_eq!( - get_account(&conf_1.node.data_url, &sender_addr).nonce, - last_sender_nonce + 2, - "The last two transactions after the flash block must be included in a block" - ); + miners + .signer_test + .check_signer_states_normal_missed_sortition(); + miners.shutdown(); } @@ -9975,21 +9937,9 @@ fn single_miner_empty_sortition() { info!("------------------------- Reached Epoch 3.0 -------------------------"); - let burn_height_contract = " - (define-data-var local-burn-block-ht uint u0) - (define-public (run-update) - (ok (var-set local-burn-block-ht burn-block-height))) - "; - - let contract_tx = make_contract_publish( - &sender_sk, - 0, - 1000, - conf.burnchain.chain_id, - "burn-height-local", - burn_height_contract, - ); - submit_tx(&conf.node.data_url, &contract_tx); + signer_test + .submit_burn_block_contract_and_wait(&sender_sk) + .expect("Timed out waiting for contract publish"); let rl1_commits = signer_test .running_nodes @@ -10011,29 +9961,9 @@ fn single_miner_empty_sortition() { // mine the interim blocks for _ in 0..2 { - let sender_nonce = get_account(&conf.node.data_url, &sender_addr).nonce; - // check if the burn contract is already produced, if not wait for it to be included in - // an interim block - if sender_nonce >= 1 { - let contract_call_tx = make_contract_call( - &sender_sk, - sender_nonce, - send_fee, - conf.burnchain.chain_id, - &sender_addr, - "burn-height-local", - "run-update", - &[], - ); - submit_tx(&conf.node.data_url, &contract_call_tx); - } - - // make sure the sender's tx gets included (whether it was the contract publish or call) - wait_for(60, || { - let next_sender_nonce = get_account(&conf.node.data_url, &sender_addr).nonce; - Ok(next_sender_nonce > sender_nonce) - }) - .unwrap(); + signer_test + .submit_burn_block_call_and_wait(&sender_sk) + .expect("Timed out waiting for contract-call"); } let last_active_sortition = get_sortition_info(&conf); @@ -10671,7 +10601,7 @@ fn interrupt_miner_on_new_stacks_tip() { TEST_SKIP_BLOCK_BROADCAST.set(true); // submit a tx so that the miner will mine a stacks block - let tx = miners.send_transfer_tx(); + let (tx, _) = miners.send_transfer_tx(); // Wait for the block with this transfer to be accepted wait_for(30, || { Ok(test_observer::get_mined_nakamoto_blocks() From cac3a32d21789bb690c037acaf20dcaa0b67d597 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Thu, 20 Mar 2025 16:52:35 -0500 Subject: [PATCH 171/238] fix comments, typos --- libsigner/src/v0/messages.rs | 4 ++-- stacks-signer/src/chainstate.rs | 2 +- stacks-signer/src/signerdb.rs | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index b09fab75dac..ab5462433ed 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -604,7 +604,7 @@ impl StacksMessageCodec for StateMachineUpdateMinerState { parent_tenure_last_block.consensus_serialize(fd)?; parent_tenure_last_block_height.consensus_serialize(fd)?; } - StateMachineUpdateMinerState::NoValidMiner => return Ok(()), + StateMachineUpdateMinerState::NoValidMiner => {} } Ok(()) } @@ -628,7 +628,7 @@ impl StacksMessageCodec for StateMachineUpdateMinerState { }) } other => Err(CodecError::DeserializeError(format!( - "Unexpect miner state variant in StateMachineUpdate: {other}" + "Unexpected miner state variant in StateMachineUpdate: {other}" ))), } } diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4bbb3bfa4a2..7c26b3a673d 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -42,7 +42,7 @@ pub enum SignerChainstateError { /// The signer could not find information about the parent tenure #[error("No information available for parent tenure '{0}'")] NoParentTenureInfo(ConsensusHash), - /// The signer could not find information about the parent tenure + /// The local state machine wasn't ready to be queried #[error("The local state machine is not ready, so no update message can be produced")] LocalStateMachineNotReady, } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 40919bcf767..1fbdb63fc0c 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -962,7 +962,7 @@ impl SignerDb { Ok(()) } - /// Get timestamp (epoch seconds) at which a burn block was received over the event dispatcheer by this signer + /// Get timestamp (epoch seconds) at which a burn block was received over the event dispatcher by this signer /// if that burn block has been received. pub fn get_burn_block_receive_time( &self, @@ -979,7 +979,7 @@ impl SignerDb { Ok(Some(receive_time)) } - /// Get timestamp (epoch seconds) at which a burn block was received over the event dispatcheer by this signer + /// Get timestamp (epoch seconds) at which a burn block was received over the event dispatcher by this signer /// if that burn block has been received. pub fn get_burn_block_receive_time_ch( &self, From e2a2d7a2024173f1661985b1af214278c41c9478 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 21 Mar 2025 14:19:58 +0100 Subject: [PATCH 172/238] use max_execution_time_secs instead of max_execution_time --- stackslib/src/config/mod.rs | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 2865fdacce6..dbb2cc1efd0 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -1104,13 +1104,9 @@ impl Config { }, miner_status, confirm_microblocks: false, - max_execution_time: if let Some(max_execution_time_secs) = - miner_config.max_execution_time - { - Some(Duration::from_secs(max_execution_time_secs)) - } else { - None - }, + max_execution_time: miner_config + .max_execution_time_secs + .map(Duration::from_secs), } } @@ -1153,13 +1149,9 @@ impl Config { }, miner_status, confirm_microblocks: true, - max_execution_time: if let Some(max_execution_time_secs) = - miner_config.max_execution_time - { - Some(Duration::from_secs(max_execution_time_secs)) - } else { - None - }, + max_execution_time: miner_config + .max_execution_time_secs + .map(Duration::from_secs), } } @@ -2192,7 +2184,7 @@ pub struct MinerConfig { /// Define the timeout to apply while waiting for signers responses, based on the amount of rejections pub block_rejection_timeout_steps: HashMap, /// Define max execution time for contract calls: transactions taking more than the specified amount of seconds will be rejected - pub max_execution_time: Option, + pub max_execution_time_secs: Option, } impl Default for MinerConfig { @@ -2242,7 +2234,7 @@ impl Default for MinerConfig { rejections_timeouts_default_map.insert(30, Duration::from_secs(0)); rejections_timeouts_default_map }, - max_execution_time: None, + max_execution_time_secs: None, } } } @@ -2642,7 +2634,7 @@ pub struct MinerConfigFile { pub tenure_timeout_secs: Option, pub tenure_extend_cost_threshold: Option, pub block_rejection_timeout_steps: Option>, - pub max_execution_time: Option, + pub max_execution_time_secs: Option, } impl MinerConfigFile { @@ -2806,7 +2798,7 @@ impl MinerConfigFile { } }, - max_execution_time: self.max_execution_time + max_execution_time_secs: self.max_execution_time_secs }) } } From 0ddfbddb6c6a5306e7572d73383c7ebf3f4cd207 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 21 Mar 2025 14:20:35 +0100 Subject: [PATCH 173/238] updated CHANGELOG --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d3ce3496feb..3952b5b8562 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added" - Add fee information to transaction log ending with "success" or "skipped", while building a new block -- Add `max_execution_time` to miner config for limiting duration of contract calls +- Add `max_execution_time_secs` to miner config for limiting duration of contract calls ### Changed From dec99dc21656ce38c5463f46618c69b7888e7042 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 21 Mar 2025 09:03:54 -0500 Subject: [PATCH 174/238] refactor: use log macros without importing from slog --- pox-locking/src/events.rs | 3 --- pox-locking/src/events_24.rs | 3 --- pox-locking/src/lib.rs | 1 - pox-locking/src/pox_1.rs | 1 - pox-locking/src/pox_2.rs | 1 - pox-locking/src/pox_3.rs | 1 - pox-locking/src/pox_4.rs | 1 - stacks-common/src/util/log.rs | 12 ++++++------ stacks-signer/src/chainstate.rs | 1 - stacks-signer/src/client/mod.rs | 1 - stacks-signer/src/client/stackerdb.rs | 1 - stacks-signer/src/client/stacks_client.rs | 1 - stacks-signer/src/lib.rs | 1 - stacks-signer/src/main.rs | 1 - stacks-signer/src/monitor_signers.rs | 1 - stacks-signer/src/monitoring/mod.rs | 2 -- stacks-signer/src/monitoring/server.rs | 1 - stacks-signer/src/runloop.rs | 1 - stacks-signer/src/signerdb.rs | 1 - stacks-signer/src/tests/chainstate.rs | 1 - stacks-signer/src/v0/signer.rs | 1 - stacks-signer/src/v0/tests.rs | 1 - stackslib/src/main.rs | 1 - 23 files changed, 6 insertions(+), 33 deletions(-) diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index e298de65f29..010a9dcf162 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -21,9 +21,6 @@ use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, ResponseData, TupleData}; use clarity::vm::Value; #[cfg(any(test, feature = "testing"))] -use slog::slog_debug; -use slog::slog_error; -#[cfg(any(test, feature = "testing"))] use stacks_common::debug; use stacks_common::types::StacksEpochId; use stacks_common::{error, test_debug}; diff --git a/pox-locking/src/events_24.rs b/pox-locking/src/events_24.rs index 3f54794bb75..b827988e948 100644 --- a/pox-locking/src/events_24.rs +++ b/pox-locking/src/events_24.rs @@ -20,9 +20,6 @@ use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier, TupleData}; use clarity::vm::Value; #[cfg(any(test, feature = "testing"))] -use slog::slog_debug; -use slog::slog_error; -#[cfg(any(test, feature = "testing"))] use stacks_common::debug; use stacks_common::{error, test_debug}; diff --git a/pox-locking/src/lib.rs b/pox-locking/src/lib.rs index 63380212dcb..fe5ea6b8991 100644 --- a/pox-locking/src/lib.rs +++ b/pox-locking/src/lib.rs @@ -30,7 +30,6 @@ use clarity::vm::contexts::GlobalContext; use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::Value; -use slog::slog_warn; use stacks_common::types::StacksEpochId; use stacks_common::warn; diff --git a/pox-locking/src/pox_1.rs b/pox-locking/src/pox_1.rs index e28ccb917e2..4cc7ffe0ea5 100644 --- a/pox-locking/src/pox_1.rs +++ b/pox-locking/src/pox_1.rs @@ -23,7 +23,6 @@ use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType}; use clarity::vm::events::{STXEventType, STXLockEventData, StacksTransactionEvent}; use clarity::vm::types::PrincipalData; use clarity::vm::Value; -use slog::slog_debug; use stacks_common::debug; use crate::LockingError; diff --git a/pox-locking/src/pox_2.rs b/pox-locking/src/pox_2.rs index 47f30faa4db..996a0c52d5a 100644 --- a/pox-locking/src/pox_2.rs +++ b/pox-locking/src/pox_2.rs @@ -23,7 +23,6 @@ use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType}; use clarity::vm::events::{STXEventType, STXLockEventData, StacksTransactionEvent}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{Environment, Value}; -use slog::{slog_debug, slog_error}; use stacks_common::{debug, error}; use crate::events::synthesize_pox_event_info; diff --git a/pox-locking/src/pox_3.rs b/pox-locking/src/pox_3.rs index 8c2616b3738..2aab7caf2b3 100644 --- a/pox-locking/src/pox_3.rs +++ b/pox-locking/src/pox_3.rs @@ -23,7 +23,6 @@ use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType}; use clarity::vm::events::{STXEventType, STXLockEventData, StacksTransactionEvent}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{Environment, Value}; -use slog::{slog_debug, slog_error}; use stacks_common::{debug, error}; use crate::events::synthesize_pox_event_info; diff --git a/pox-locking/src/pox_4.rs b/pox-locking/src/pox_4.rs index 8eda9a2e897..2853c892124 100644 --- a/pox-locking/src/pox_4.rs +++ b/pox-locking/src/pox_4.rs @@ -23,7 +23,6 @@ use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType}; use clarity::vm::events::{STXEventType, STXLockEventData, StacksTransactionEvent}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{Environment, Value}; -use slog::{slog_debug, slog_error}; use stacks_common::{debug, error}; use crate::events::synthesize_pox_event_info; diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index 77a4950f818..0fba87a16c8 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -268,7 +268,7 @@ macro_rules! trace { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Trace.is_at_least(cur_level) { - slog_trace!($crate::util::log::LOGGER, $($arg)*) + slog::slog_trace!($crate::util::log::LOGGER, $($arg)*) } }) } @@ -278,7 +278,7 @@ macro_rules! error { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Error.is_at_least(cur_level) { - slog_error!($crate::util::log::LOGGER, $($arg)*) + slog::slog_error!($crate::util::log::LOGGER, $($arg)*) } }) } @@ -288,7 +288,7 @@ macro_rules! warn { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Warning.is_at_least(cur_level) { - slog_warn!($crate::util::log::LOGGER, $($arg)*) + slog::slog_warn!($crate::util::log::LOGGER, $($arg)*) } }) } @@ -298,7 +298,7 @@ macro_rules! info { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Info.is_at_least(cur_level) { - slog_info!($crate::util::log::LOGGER, $($arg)*) + slog::slog_info!($crate::util::log::LOGGER, $($arg)*) } }) } @@ -308,7 +308,7 @@ macro_rules! debug { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Debug.is_at_least(cur_level) { - slog_debug!($crate::util::log::LOGGER, $($arg)*) + slog::slog_debug!($crate::util::log::LOGGER, $($arg)*) } }) } @@ -318,7 +318,7 @@ macro_rules! fatal { ($($arg:tt)*) => ({ let cur_level = $crate::util::log::get_loglevel(); if slog::Level::Critical.is_at_least(cur_level) { - slog_crit!($crate::util::log::LOGGER, $($arg)*) + slog::slog_crit!($crate::util::log::LOGGER, $($arg)*) } }) } diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 8654450738a..a602e6f048d 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -20,7 +20,6 @@ use blockstack_lib::chainstate::stacks::TenureChangePayload; use blockstack_lib::net::api::getsortition::SortitionInfo; use blockstack_lib::util_lib::db::Error as DBError; use libsigner::v0::messages::RejectReason; -use slog::{slog_info, slog_warn}; use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash, StacksPublicKey}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Hash160; diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index ab8ad610562..59b8309d536 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -25,7 +25,6 @@ use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::serialization::SerializationError; use libsigner::RPCError; use libstackerdb::Error as StackerDBError; -use slog::slog_debug; pub use stackerdb::*; pub use stacks_client::*; use stacks_common::codec::Error as CodecError; diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 222eda72df8..a08ff727b4a 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -19,7 +19,6 @@ use clarity::codec::read_next; use hashbrown::HashMap; use libsigner::{MessageSlotID, SignerMessage, SignerSession, StackerDBSession}; use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; -use slog::{slog_debug, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::util::hash::to_hex; use stacks_common::{debug, info, warn}; diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index db0b356fb40..9f3eeb77b2c 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -43,7 +43,6 @@ use libsigner::v0::messages::PeerInfo; use reqwest::header::AUTHORIZATION; use serde::Deserialize; use serde_json::json; -use slog::{slog_debug, slog_warn}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::CHAIN_ID_MAINNET; use stacks_common::types::chainstate::{ diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 9f2df125341..925afb834ed 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -52,7 +52,6 @@ use chainstate::SortitionsView; use config::GlobalConfig; use libsigner::{SignerEvent, SignerEventReceiver, SignerEventTrait, VERSION_STRING}; use runloop::SignerResult; -use slog::{slog_info, slog_warn}; use stacks_common::{info, warn}; use crate::client::StacksClient; diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 821f2e1c6ec..2e1bf771e6f 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -34,7 +34,6 @@ use clarity::types::chainstate::StacksPublicKey; use clarity::util::sleep_ms; use libsigner::{SignerSession, VERSION_STRING}; use libstackerdb::StackerDBChunkData; -use slog::{slog_debug, slog_error}; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, error}; diff --git a/stacks-signer/src/monitor_signers.rs b/stacks-signer/src/monitor_signers.rs index 65b4fdda3e4..bbd59f00ea0 100644 --- a/stacks-signer/src/monitor_signers.rs +++ b/stacks-signer/src/monitor_signers.rs @@ -21,7 +21,6 @@ use clarity::types::StacksEpochId; use clarity::util::sleep_ms; use libsigner::v0::messages::{MessageSlotID, SignerMessage}; use libsigner::SignerSession; -use slog::{slog_info, slog_warn}; use stacks_common::{info, warn}; use crate::cli::MonitorSignersArgs; diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index 60a530acabd..d4974aec7f5 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -25,7 +25,6 @@ mod server; pub mod actions { use ::prometheus::HistogramTimer; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; - use slog::slog_error; use stacks_common::error; use crate::config::GlobalConfig; @@ -121,7 +120,6 @@ pub mod actions { #[cfg(not(feature = "monitoring_prom"))] pub mod actions { use blockstack_lib::chainstate::nakamoto::NakamotoBlock; - use slog::slog_info; use stacks_common::info; use crate::GlobalConfig; diff --git a/stacks-signer/src/monitoring/server.rs b/stacks-signer/src/monitoring/server.rs index 0e584eec58f..2b20b9131c0 100644 --- a/stacks-signer/src/monitoring/server.rs +++ b/stacks-signer/src/monitoring/server.rs @@ -20,7 +20,6 @@ use std::time::Instant; use clarity::util::hash::to_hex; use clarity::util::secp256k1::Secp256k1PublicKey; use libsigner::VERSION_STRING; -use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::{debug, error, info, warn}; use tiny_http::{Response as HttpResponse, Server as HttpServer}; diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index f8bb4acac99..d9c75d664b7 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -20,7 +20,6 @@ use std::time::Duration; use clarity::codec::StacksMessageCodec; use hashbrown::HashMap; use libsigner::{SignerEntries, SignerEvent, SignerRunLoop}; -use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::{debug, error, info, warn}; use crate::chainstate::SortitionsView; diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5d166a3ecfb..b828cc7acbd 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -35,7 +35,6 @@ use rusqlite::{ params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Transaction, }; use serde::{Deserialize, Serialize}; -use slog::{slog_debug, slog_error}; use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::get_epoch_time_secs; diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 9ffabeed6c5..4b57c76d0ca 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -31,7 +31,6 @@ use clarity::types::chainstate::{BurnchainHeaderHash, SortitionId}; use clarity::util::vrf::VRFProof; use libsigner::v0::messages::RejectReason; use libsigner::{BlockProposal, BlockProposalData}; -use slog::slog_info; use stacks_common::bitvec::BitVec; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::info; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2decd05dc16..ad9b6107879 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -40,7 +40,6 @@ use libsigner::v0::messages::{ RejectReason, RejectReasonPrefix, SignerMessage, }; use libsigner::{BlockProposal, SignerEvent}; -use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::secp256k1::MessageSignature; diff --git a/stacks-signer/src/v0/tests.rs b/stacks-signer/src/v0/tests.rs index 9ea494f808b..6fb7ffa9fe5 100644 --- a/stacks-signer/src/v0/tests.rs +++ b/stacks-signer/src/v0/tests.rs @@ -18,7 +18,6 @@ use std::sync::LazyLock; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use libsigner::v0::messages::{BlockResponse, RejectReason}; use libsigner::BlockProposal; -use slog::{slog_info, slog_warn}; use stacks_common::types::chainstate::StacksPublicKey; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::tests::TestFlag; diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 46838f217df..d5866507c8f 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -22,7 +22,6 @@ #[macro_use] extern crate stacks_common; -#[macro_use(slog_debug, slog_info, slog_warn)] extern crate slog; #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] From 94b55c559e4b66f297ada906fdf46bec58382990 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 21 Mar 2025 10:27:31 -0400 Subject: [PATCH 175/238] feat: add `considered_txs` table This is used in combination with the nonce table to limit the transactions that are selected in each execution of the query. Without this, a large number of unexecutable or ignored transactions would prevent forward progress. --- stackslib/src/chainstate/stacks/miner.rs | 4 +- stackslib/src/core/mempool.rs | 73 ++++++++++++++++++- stackslib/src/core/tests/mod.rs | 12 +-- .../stacks-node/src/nakamoto_node/miner.rs | 2 +- 4 files changed, 78 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 2e1b5030ef3..8fa6762cc08 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1246,7 +1246,7 @@ impl<'a> StacksMicroblockBuilder<'a> { let deadline = get_epoch_time_ms() + u128::from(self.settings.max_miner_time_ms); let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; - mem_pool.reset_nonce_cache()?; + mem_pool.reset_mempool_caches()?; let stacks_epoch_id = clarity_tx.get_epoch(); let block_limit = clarity_tx .block_limit() @@ -2620,7 +2620,7 @@ impl StacksBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); - mempool.reset_nonce_cache()?; + mempool.reset_mempool_caches()?; let (blocked, tx_events) = match Self::select_and_apply_transactions( &mut epoch_tx, &mut builder, diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index ef52342854d..06bdcd9eb8d 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -22,11 +22,12 @@ use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::{Duration, Instant, SystemTime}; -use std::{fs, io}; +use std::{fs, io, thread}; use clarity::vm::types::PrincipalData; use rand::distributions::Uniform; use rand::prelude::Distribution; +use rand::Rng; use rusqlite::types::ToSql; use rusqlite::{ params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Row, Rows, Statement, @@ -101,6 +102,9 @@ pub const DEFAULT_BLACKLIST_MAX_SIZE: u64 = 134217728; // 2**27 -- the blacklist // loading the bloom filter, even though the bloom filter is larger. const DEFAULT_MAX_TX_TAGS: u32 = 2048; +// maximum number of transactions that can fit in a single block +const MAX_BLOCK_TXS: usize = 11_650; + /// A node-specific transaction tag -- the first 8 bytes of siphash(local-seed,txid) #[derive(Debug, Clone, PartialEq, Hash, Eq)] pub struct TxTag(pub [u8; 8]); @@ -837,6 +841,13 @@ const MEMPOOL_SCHEMA_7_TIME_ESTIMATES: &[&str] = &[ ]; const MEMPOOL_SCHEMA_8_NONCE_SORTING: &'static [&'static str] = &[ + r#" + -- Add table to track considered transactions + CREATE TABLE IF NOT EXISTS considered_txs( + txid TEXT PRIMARY KEY NOT NULL, + FOREIGN KEY(txid) REFERENCES mempool(txid) ON DELETE CASCADE + ); + "#, r#" -- Drop redundant mempool indexes, covered by unique constraints DROP INDEX IF EXISTS "by_txid"; @@ -1429,10 +1440,12 @@ impl MemPoolDB { } #[cfg_attr(test, mutants::skip)] - pub fn reset_nonce_cache(&mut self) -> Result<(), db_error> { + pub fn reset_mempool_caches(&mut self) -> Result<(), db_error> { debug!("reset nonce cache"); - let sql = "DELETE FROM nonces"; - self.db.execute(sql, NO_PARAMS)?; + // Delete all rows from the nonces table + self.db.execute("DELETE FROM nonces", NO_PARAMS)?; + // Also delete all rows from the considered_txs table + self.db.execute("DELETE FROM considered_txs", NO_PARAMS)?; Ok(()) } @@ -1569,6 +1582,7 @@ impl MemPoolDB { { let start_time = Instant::now(); let mut total_considered = 0; + let mut considered_txs = Vec::with_capacity(MAX_BLOCK_TXS); debug!("Mempool walk for {}ms", settings.max_walk_time_ms,); @@ -1635,6 +1649,7 @@ impl MemPoolDB { LEFT JOIN nonces AS ns ON m.sponsor_address = ns.address WHERE (no.address IS NULL OR m.origin_nonce = no.nonce) AND (ns.address IS NULL OR m.sponsor_nonce = ns.nonce) + AND m.txid NOT IN (SELECT txid FROM considered_txs) ORDER BY accept_time ASC LIMIT 11650 -- max transactions that can fit in one block ), @@ -1773,6 +1788,7 @@ impl MemPoolDB { // Candidate transaction: fall through } }; + considered_txs.push(candidate.txid); // Read in and deserialize the transaction. let tx_info_option = MemPoolDB::get_tx(self.conn(), &candidate.txid)?; @@ -1900,6 +1916,10 @@ impl MemPoolDB { // Flush the nonce cache to the database before performing the next // query. nonce_cache.flush(&mut nonce_conn); + + // Flush the candidate cache to the database before performing the + // next query. + flush_considered_txs(&mut nonce_conn, &mut considered_txs); }; // drop these rusqlite statements and queries, since their existence as immutable borrows on the @@ -2881,3 +2901,48 @@ impl MemPoolDB { Ok((ret, next_page, num_rows_visited)) } } + +/// Flush the considered transaction IDs to the DB. +/// Do not return until successful. After a successful flush, clear the vector. +pub fn flush_considered_txs(conn: &mut DBConn, considered_txs: &mut Vec) { + const MAX_BACKOFF: Duration = Duration::from_secs(30); + let mut backoff = Duration::from_millis(rand::thread_rng().gen_range(50..200)); + + loop { + // Pass a slice to the try function. + let result = try_flush_considered_txs(conn, considered_txs.as_slice()); + + match result { + Ok(_) => { + // On success, clear the vector so that it’s empty. + considered_txs.clear(); + return; + } + Err(e) => { + warn!("Considered txid flush failed: {e}. Retrying in {backoff:?}"); + thread::sleep(backoff); + if backoff < MAX_BACKOFF { + backoff = + backoff * 2 + Duration::from_millis(rand::thread_rng().gen_range(50..200)); + } + } + } + } +} + +/// Try to flush the considered transaction IDs to the DB. +pub fn try_flush_considered_txs( + conn: &mut DBConn, + considered_txs: &[Txid], +) -> Result<(), db_error> { + let sql = "INSERT OR IGNORE INTO considered_txs (txid) VALUES (?1)"; + + let db_tx = conn.transaction()?; + + for txid in considered_txs { + db_tx.execute(sql, params![txid])?; + } + + db_tx.commit()?; + Ok(()) +} diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index f964d552cbb..120acb478fd 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -334,7 +334,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); chainstate.with_read_only_clarity_tx( @@ -373,7 +373,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); // The mempool iterator no longer does any consideration of what block accepted @@ -414,7 +414,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); chainstate.with_read_only_clarity_tx( @@ -453,7 +453,7 @@ fn mempool_walk_over_fork() { ); mempool - .reset_nonce_cache() + .reset_mempool_caches() .expect("Should be able to reset nonces"); // let's test replace-across-fork while we're here. @@ -675,7 +675,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { ); // Next with 0% - let _ = mempool.reset_nonce_cache(); + let _ = mempool.reset_mempool_caches(); mempool_settings.consider_no_estimate_tx_prob = 0; chainstate.with_read_only_clarity_tx( @@ -711,7 +711,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { ); // Then with with 100% - let _ = mempool.reset_nonce_cache(); + let _ = mempool.reset_mempool_caches(); mempool_settings.consider_no_estimate_tx_prob = 100; chainstate.with_read_only_clarity_tx( diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index d0cb13521c7..eeb7650f31d 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -514,7 +514,7 @@ impl BlockMinerThread { .config .connect_mempool_db() .expect("Database failure opening mempool"); - mem_pool.reset_nonce_cache()?; + mem_pool.reset_mempool_caches()?; } // If we're mock mining, we may not have processed the block that the From 4921f5b308558d761a3d5af3d512a3d64a745d78 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 21 Mar 2025 11:06:48 -0500 Subject: [PATCH 176/238] versioning support for state machine update messages * add content-length * add versioning enum with compatibility check --- libsigner/src/v0/messages.rs | 206 +++++++++++++++++++++------ stacks-signer/src/v0/signer_state.rs | 26 ++-- 2 files changed, 180 insertions(+), 52 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index ab5462433ed..a27da8058d5 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -25,6 +25,7 @@ use std::fmt::{Debug, Display}; use std::io::{Read, Write}; +use std::marker::PhantomData; use std::net::{SocketAddr, TcpListener, TcpStream}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; @@ -77,6 +78,9 @@ use crate::{ /// Maximum size of the [BlockResponseData] serialized bytes pub const BLOCK_RESPONSE_DATA_MAX_SIZE: u32 = 2 * 1024 * 1024; // 2MB +/// Maximum size of the state machine update messages +pub const STATE_MACHINE_UPDATE_MAX_SIZE: u32 = 2 * 1024 * 1024; // 2MB + define_u8_enum!( /// Enum representing the stackerdb message identifier: this is /// the contract index in the signers contracts (i.e., X in signers-0-X) @@ -541,19 +545,31 @@ impl StacksMessageCodec for MockBlock { } } -/// Message for update the Signer State infos +/// Message for updates to the Signer State machine #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct StateMachineUpdate { - /// The tip burn block (i.e., the latest bitcoin block) seen by this signer - pub burn_block: ConsensusHash, - /// The tip burn block height (i.e., the latest bitcoin block) seen by this signer - pub burn_block_height: u64, - /// The signer's view of who the current miner should be (and their tenure building info) - pub current_miner: StateMachineUpdateMinerState, /// The active signing protocol version pub active_signer_protocol_version: u64, /// The highest supported signing protocol by the local signer pub local_supported_signer_protocol_version: u64, + /// The actual content of the state machine update message (this is a versioned enum) + pub content: StateMachineUpdateContent, + // Prevent manual construction of this struct + no_manual_construct: PhantomData<()>, +} + +/// Versioning enum for StateMachineUpdate messages +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub enum StateMachineUpdateContent { + /// Version 0 + V0 { + /// The tip burn block (i.e., the latest bitcoin block) seen by this signer + burn_block: ConsensusHash, + /// The tip burn block height (i.e., the latest bitcoin block) seen by this signer + burn_block_height: u64, + /// The signer's view of who the current miner should be (and their tenure building info) + current_miner: StateMachineUpdateMinerState, + }, } /// Message for update the Signer State infos @@ -578,6 +594,26 @@ pub enum StateMachineUpdateMinerState { NoValidMiner, } +impl StateMachineUpdate { + /// Construct a StateMachineUpdate message, checking to ensure that the + /// supplied content is supported by the supplied protocol versions. + pub fn new( + active_signer_protocol_version: u64, + local_supported_signer_protocol_version: u64, + content: StateMachineUpdateContent, + ) -> Result { + if !content.is_protocol_version_compatible(active_signer_protocol_version) { + return Err(CodecError::DeserializeError(format!("StateMachineUpdateContent is incompatible with protocol version: {active_signer_protocol_version}"))); + } + Ok(Self { + active_signer_protocol_version, + local_supported_signer_protocol_version, + content, + no_manual_construct: PhantomData, + }) + } +} + impl StateMachineUpdateMinerState { fn get_variant_id(&self) -> u8 { match self { @@ -634,32 +670,89 @@ impl StacksMessageCodec for StateMachineUpdateMinerState { } } +impl StateMachineUpdateContent { + // Is the protocol version specified one that uses self's content? + fn is_protocol_version_compatible(&self, version: u64) -> bool { + match self { + Self::V0 { .. } => version == 0, + } + } + + fn serialize(&self, fd: &mut W) -> Result<(), CodecError> { + match self { + Self::V0 { + burn_block, + burn_block_height, + current_miner, + } => { + burn_block.consensus_serialize(fd)?; + burn_block_height.consensus_serialize(fd)?; + current_miner.consensus_serialize(fd)?; + } + } + Ok(()) + } + fn deserialize(fd: &mut R, version: u64) -> Result { + match version { + 0 => { + let burn_block = read_next(fd)?; + let burn_block_height = read_next(fd)?; + let current_miner = read_next(fd)?; + Ok(Self::V0 { + burn_block, + burn_block_height, + current_miner, + }) + } + other => Err(CodecError::DeserializeError(format!( + "Unknown state machine update version: {other}" + ))), + } + } +} + impl StacksMessageCodec for StateMachineUpdate { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { self.active_signer_protocol_version .consensus_serialize(fd)?; self.local_supported_signer_protocol_version .consensus_serialize(fd)?; - self.burn_block.consensus_serialize(fd)?; - self.burn_block_height.consensus_serialize(fd)?; - self.current_miner.consensus_serialize(fd)?; - Ok(()) + let mut buffer = Vec::new(); + self.content.serialize(&mut buffer)?; + let buff_len = u32::try_from(buffer.len()) + .map_err(|_e| CodecError::SerializeError("Message length exceeded u32".into()))?; + if buff_len > STATE_MACHINE_UPDATE_MAX_SIZE { + return Err(CodecError::SerializeError(format!( + "Message length exceeded max: {STATE_MACHINE_UPDATE_MAX_SIZE}" + ))); + } + buff_len.consensus_serialize(fd)?; + fd.write_all(&buffer).map_err(CodecError::WriteError) } fn consensus_deserialize(fd: &mut R) -> Result { let active_signer_protocol_version = read_next(fd)?; let local_supported_signer_protocol_version = read_next(fd)?; - let burn_block = read_next(fd)?; - let burn_block_height = read_next(fd)?; - let current_miner = read_next(fd)?; + let content_len: u32 = read_next(fd)?; + if content_len > STATE_MACHINE_UPDATE_MAX_SIZE { + return Err(CodecError::DeserializeError(format!( + "Message length exceeded max: {STATE_MACHINE_UPDATE_MAX_SIZE}" + ))); + } + let buffer_len = usize::try_from(content_len) + .expect("FATAL: cannot process signer messages when usize < u32"); + let mut buffer = vec![0u8; buffer_len]; + fd.read_exact(&mut buffer).map_err(CodecError::ReadError)?; + let content = StateMachineUpdateContent::deserialize( + &mut buffer.as_slice(), + active_signer_protocol_version, + )?; - Ok(Self { - burn_block, - burn_block_height, - current_miner, + Self::new( active_signer_protocol_version, local_supported_signer_protocol_version, - }) + content, + ) } } @@ -2121,28 +2214,53 @@ mod test { } #[test] - fn test_deserialize_state_machine_update() { - let signer_message = StateMachineUpdate { - burn_block: ConsensusHash([0x55; 20]), - burn_block_height: 100, - active_signer_protocol_version: 2, - local_supported_signer_protocol_version: 3, - current_miner: StateMachineUpdateMinerState::ActiveMiner { - current_miner_pkh: Hash160([0xab; 20]), - tenure_id: ConsensusHash([0x44; 20]), - parent_tenure_id: ConsensusHash([0x22; 20]), - parent_tenure_last_block: StacksBlockId([0x33; 32]), - parent_tenure_last_block_height: 1, + fn version_check_state_machine_update() { + let error = StateMachineUpdate::new( + 1, + 3, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh: Hash160([0xab; 20]), + tenure_id: ConsensusHash([0x44; 20]), + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), + parent_tenure_last_block_height: 1, + }, }, - }; + ) + .unwrap_err(); + assert!(matches!(error, CodecError::DeserializeError(_))); + } + + #[test] + fn deserialize_state_machine_update_v0() { + let signer_message = StateMachineUpdate::new( + 0, + 3, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh: Hash160([0xab; 20]), + tenure_id: ConsensusHash([0x44; 20]), + parent_tenure_id: ConsensusHash([0x22; 20]), + parent_tenure_last_block: StacksBlockId([0x33; 32]), + parent_tenure_last_block_height: 1, + }, + }, + ) + .unwrap(); let mut bytes = vec![]; signer_message.consensus_serialize(&mut bytes).unwrap(); // check for raw content for avoiding regressions when structure changes let raw_signer_message: Vec<&[u8]> = vec![ - /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 2], + /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 0], /* local_supported_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 3], + /* content_len*/ &[0, 0, 0, 129], /* burn_block*/ &[0x55; 20], /* burn_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 100], /* current_miner_variant */ &[0x01], @@ -2160,21 +2278,25 @@ mod test { assert_eq!(signer_message, signer_message_deserialized); - let signer_message = StateMachineUpdate { - burn_block: ConsensusHash([0x55; 20]), - burn_block_height: 100, - active_signer_protocol_version: 2, - local_supported_signer_protocol_version: 3, - current_miner: StateMachineUpdateMinerState::NoValidMiner, - }; + let signer_message = StateMachineUpdate::new( + 0, + 4, + StateMachineUpdateContent::V0 { + burn_block: ConsensusHash([0x55; 20]), + burn_block_height: 100, + current_miner: StateMachineUpdateMinerState::NoValidMiner, + }, + ) + .unwrap(); let mut bytes = vec![]; signer_message.consensus_serialize(&mut bytes).unwrap(); // check for raw content for avoiding regressions when structure changes let raw_signer_message: Vec<&[u8]> = vec![ - /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 2], - /* local_supported_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 3], + /* active_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 0], + /* local_supported_signer_protocol_version*/ &[0, 0, 0, 0, 0, 0, 0, 4], + /* content_len*/ &[0, 0, 0, 29], /* burn_block*/ &[0x55; 20], /* burn_block_height*/ &[0, 0, 0, 0, 0, 0, 0, 100], /* current_miner_variant */ &[0x00], diff --git a/stacks-signer/src/v0/signer_state.rs b/stacks-signer/src/v0/signer_state.rs index 0559f716f9d..f97c6737cd7 100644 --- a/stacks-signer/src/v0/signer_state.rs +++ b/stacks-signer/src/v0/signer_state.rs @@ -18,11 +18,13 @@ use std::time::{Duration, UNIX_EPOCH}; use blockstack_lib::chainstate::burn::ConsensusHashExtensions; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use libsigner::v0::messages::{ - StateMachineUpdate as StateMachineUpdateMessage, StateMachineUpdateMinerState, + StateMachineUpdate as StateMachineUpdateMessage, StateMachineUpdateContent, + StateMachineUpdateMinerState, }; use serde::{Deserialize, Serialize}; use slog::{slog_info, slog_warn}; use stacks_common::bitvec::BitVec; +use stacks_common::codec::Error as CodecError; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; @@ -100,11 +102,13 @@ pub enum StateMachineUpdate { } impl TryInto for &LocalStateMachine { - type Error = SignerChainstateError; + type Error = CodecError; fn try_into(self) -> Result { let LocalStateMachine::Initialized(state_machine) = self else { - return Err(SignerChainstateError::LocalStateMachineNotReady); + return Err(CodecError::SerializeError( + "Local state machine is not ready to be serialized into an update message".into(), + )); }; let current_miner = match state_machine.current_miner { @@ -124,13 +128,15 @@ impl TryInto for &LocalStateMachine { MinerState::NoValidMiner => StateMachineUpdateMinerState::NoValidMiner, }; - Ok(StateMachineUpdateMessage { - burn_block: state_machine.burn_block, - burn_block_height: state_machine.burn_block_height, - current_miner, - active_signer_protocol_version: state_machine.active_signer_protocol_version, - local_supported_signer_protocol_version: SUPPORTED_SIGNER_PROTOCOL_VERSION, - }) + StateMachineUpdateMessage::new( + state_machine.active_signer_protocol_version, + SUPPORTED_SIGNER_PROTOCOL_VERSION, + StateMachineUpdateContent::V0 { + burn_block: state_machine.burn_block, + burn_block_height: state_machine.burn_block_height, + current_miner, + }, + ) } } From 48ca4e54e5ed2071702433beb9a12e56bac96f5c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 21 Mar 2025 12:07:09 -0400 Subject: [PATCH 177/238] test: reduce test flakiness by ignoring phantom transactions Multiple tests check for a specific number of transfers in a block, and if a phantom transaction happens to end up in that block, the count will be off and the test will fail. --- testnet/stacks-node/src/tests/signer/v0.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6fee616edd5..3ac0443edce 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12176,8 +12176,11 @@ fn transfers_in_block(block: &serde_json::Value) -> usize { let raw_tx = tx["raw_tx"].as_str().unwrap(); let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::TokenTransfer(..) = &parsed.payload { - count += 1; + if let TransactionPayload::TokenTransfer(_, amount, _) = &parsed.payload { + // Don't count phantom transactions, which have a 0 amount. + if *amount > 0 { + count += 1; + } } } count From a46d67dd8f46724d852f072efb622a44d13cc071 Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Fri, 21 Mar 2025 19:13:32 +0100 Subject: [PATCH 178/238] chore: upgrade curve25519-dalek to v4 --- Cargo.lock | 54 +++++++---------------------------- stacks-common/Cargo.toml | 2 +- stacks-common/src/util/vrf.rs | 30 +++++++++---------- stackslib/Cargo.toml | 4 --- 4 files changed, 27 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a51010ecdf3..7f9c8abf160 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,7 +29,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -479,7 +479,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -488,7 +488,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -572,7 +572,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -745,7 +745,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.7", + "generic-array", "typenum", ] @@ -755,7 +755,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" dependencies = [ - "generic-array 0.14.7", + "generic-array", "subtle", ] @@ -768,20 +768,6 @@ dependencies = [ "cipher", ] -[[package]] -name = "curve25519-dalek" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26778518a7f6cffa1d25a44b602b62b979bd88adb9e99ffec546998cf3404839" -dependencies = [ - "byteorder", - "digest 0.8.1", - "rand_core 0.5.1", - "serde", - "subtle", - "zeroize", -] - [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -794,6 +780,7 @@ dependencies = [ "digest 0.10.7", "fiat-crypto", "rustc_version 0.4.0", + "serde", "subtle", "zeroize", ] @@ -834,22 +821,13 @@ dependencies = [ "powerfmt", ] -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.4", -] - [[package]] name = "digest" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -906,7 +884,7 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ - "curve25519-dalek 4.1.3", + "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", @@ -1193,15 +1171,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generic-array" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -3043,7 +3012,7 @@ name = "stacks-common" version = "0.0.1" dependencies = [ "chrono", - "curve25519-dalek 2.0.0", + "curve25519-dalek", "ed25519-dalek", "hashbrown 0.15.2", "lazy_static", @@ -3152,7 +3121,6 @@ dependencies = [ "assert-json-diff 1.1.0", "chrono", "clarity", - "curve25519-dalek 2.0.0", "ed25519-dalek", "hashbrown 0.15.2", "integer-sqrt", @@ -3708,7 +3676,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ - "generic-array 0.14.7", + "generic-array", "subtle", ] diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 4b965d753d2..82fc521b149 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -54,7 +54,7 @@ features = ["serde", "recovery"] workspace = true [dependencies.curve25519-dalek] -version = "=2.0.0" +version = "4.1.3" features = ["serde"] [dependencies.time] diff --git a/stacks-common/src/util/vrf.rs b/stacks-common/src/util/vrf.rs index 5c7439daf94..c8f18801282 100644 --- a/stacks-common/src/util/vrf.rs +++ b/stacks-common/src/util/vrf.rs @@ -26,7 +26,7 @@ use std::{error, fmt}; use curve25519_dalek::constants::ED25519_BASEPOINT_POINT; use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint}; -use curve25519_dalek::scalar::Scalar as ed25519_Scalar; +use curve25519_dalek::scalar::{clamp_integer, Scalar as ed25519_Scalar}; use rand; use sha2::{Digest, Sha512}; @@ -246,7 +246,7 @@ impl VRFProof { #[allow(clippy::needless_range_loop)] pub fn check_c(c: &ed25519_Scalar) -> bool { - let c_bytes = c.reduce().to_bytes(); + let c_bytes = c.to_bytes(); // upper 16 bytes of c must be 0's for c_byte in c_bytes[16..32].iter() { @@ -281,7 +281,9 @@ impl VRFProof { // 0 32 48 80 // |----------------------------|----------|---------------------------| // Gamma point c scalar s scalar - let gamma_opt = CompressedEdwardsY::from_slice(&bytes[0..32]).decompress(); + let gamma_opt = CompressedEdwardsY::from_slice(&bytes[0..32]) + .unwrap() + .decompress(); if gamma_opt.is_none() { test_debug!("Invalid Gamma"); return None; @@ -297,8 +299,8 @@ impl VRFProof { c_buf[..16].copy_from_slice(&bytes[32..(16 + 32)]); s_buf[..32].copy_from_slice(&bytes[48..(32 + 48)]); - let c = ed25519_Scalar::from_canonical_bytes(c_buf)?; - let s = ed25519_Scalar::from_canonical_bytes(s_buf)?; + let c = ed25519_Scalar::from_canonical_bytes(c_buf).expect("Invalid C scalar"); + let s = ed25519_Scalar::from_canonical_bytes(s_buf).expect("Invalid S scalar"); Some(VRFProof { Gamma: gamma, c, s }) } @@ -324,7 +326,7 @@ impl VRFProof { "FATAL ERROR: somehow constructed an invalid ECVRF proof" ); - let c_bytes = self.c.reduce().to_bytes(); + let c_bytes = self.c.to_bytes(); c_bytes_16[0..16].copy_from_slice(&c_bytes[0..16]); let gamma_bytes = self.Gamma.compress().to_bytes(); @@ -386,7 +388,7 @@ impl VRF { } let y = CompressedEdwardsY::from_slice(&hasher.finalize()[0..32]); - if let Some(h) = y.decompress() { + if let Some(h) = y.unwrap().decompress() { break h; } @@ -445,8 +447,7 @@ impl VRF { let mut h_32 = [0u8; 32]; h_32.copy_from_slice(&h[0..32]); - let x_scalar_raw = ed25519_Scalar::from_bits(h_32); - let x_scalar = x_scalar_raw.reduce(); // use the canonical scalar for the private key + let x_scalar = ed25519_Scalar::from_bytes_mod_order(clamp_integer(h_32)); trunc_hash.copy_from_slice(&h[32..64]); @@ -473,7 +474,7 @@ impl VRF { let mut scalar_buf = [0u8; 32]; scalar_buf[0..16].copy_from_slice(hash128); - ed25519_Scalar::from_bits(scalar_buf) + ed25519_Scalar::from_canonical_bytes(scalar_buf).expect("Invalid scalar") } /// ECVRF proof routine @@ -492,8 +493,7 @@ impl VRF { let c_hashbuf = VRF::hash_points(&H_point, &Gamma_point, &kB_point, &kH_point); let c_scalar = VRF::ed25519_scalar_from_hash128(&c_hashbuf); - let s_full_scalar = &k_scalar + &c_scalar * &x_scalar; - let s_scalar = s_full_scalar.reduce(); + let s_scalar = &k_scalar + &c_scalar * &x_scalar; // NOTE: expect() won't panic because c_scalar is guaranteed to have // its upper 16 bytes as 0 @@ -509,7 +509,7 @@ impl VRF { #[allow(clippy::op_ref)] pub fn verify(Y_point: &VRFPublicKey, proof: &VRFProof, alpha: &[u8]) -> Result { let H_point = VRF::hash_to_curve(Y_point, alpha); - let s_reduced = proof.s().reduce(); + let s_reduced = proof.s(); let Y_point_ed = CompressedEdwardsY(Y_point.to_bytes()) .decompress() .ok_or(Error::InvalidPublicKey)?; @@ -517,8 +517,8 @@ impl VRF { return Err(Error::InvalidPublicKey); } - let U_point = &s_reduced * &ED25519_BASEPOINT_POINT - proof.c() * Y_point_ed; - let V_point = &s_reduced * &H_point - proof.c() * proof.Gamma(); + let U_point = s_reduced * &ED25519_BASEPOINT_POINT - proof.c() * Y_point_ed; + let V_point = s_reduced * &H_point - proof.c() * proof.Gamma(); let c_prime_hashbuf = VRF::hash_points(&H_point, proof.Gamma(), &U_point, &V_point); let c_prime = VRF::ed25519_scalar_from_hash128(&c_prime_hashbuf); diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index cf0ae6c1f86..0145f0382d3 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -83,10 +83,6 @@ features = ["serde", "recovery"] [dependencies.ed25519-dalek] workspace = true -[dependencies.curve25519-dalek] -version = "=2.0.0" -features = ["serde"] - [dependencies.time] version = "0.2.23" features = ["std"] From 2dbc778cb3b3238e5755b12d1861a1c2c6b62231 Mon Sep 17 00:00:00 2001 From: Hugo C <911307+hugocaillard@users.noreply.github.com> Date: Fri, 21 Mar 2025 20:23:17 +0100 Subject: [PATCH 179/238] refactor: apply suggestions from code review Co-authored-by: Aaron Blankstein --- stacks-common/src/util/vrf.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/stacks-common/src/util/vrf.rs b/stacks-common/src/util/vrf.rs index c8f18801282..781bf646fe7 100644 --- a/stacks-common/src/util/vrf.rs +++ b/stacks-common/src/util/vrf.rs @@ -282,8 +282,8 @@ impl VRFProof { // |----------------------------|----------|---------------------------| // Gamma point c scalar s scalar let gamma_opt = CompressedEdwardsY::from_slice(&bytes[0..32]) - .unwrap() - .decompress(); + .ok() + .and_then(|y| y.decompress()); if gamma_opt.is_none() { test_debug!("Invalid Gamma"); return None; @@ -299,8 +299,8 @@ impl VRFProof { c_buf[..16].copy_from_slice(&bytes[32..(16 + 32)]); s_buf[..32].copy_from_slice(&bytes[48..(32 + 48)]); - let c = ed25519_Scalar::from_canonical_bytes(c_buf).expect("Invalid C scalar"); - let s = ed25519_Scalar::from_canonical_bytes(s_buf).expect("Invalid S scalar"); + let c = ed25519_Scalar::from_canonical_bytes(c_buf).ok()?; + let s = ed25519_Scalar::from_canonical_bytes(s_buf).ok()?; Some(VRFProof { Gamma: gamma, c, s }) } @@ -388,7 +388,7 @@ impl VRF { } let y = CompressedEdwardsY::from_slice(&hasher.finalize()[0..32]); - if let Some(h) = y.unwrap().decompress() { + if let Some(h) = y.ok().and_then(|y| y.decompress()) { break h; } From 8c9d7281e74479f59d677037a7c7415f46a7276c Mon Sep 17 00:00:00 2001 From: Hugo CAILLARD <911307+hugocaillard@users.noreply.github.com> Date: Fri, 21 Mar 2025 21:00:22 +0100 Subject: [PATCH 180/238] refactor: fix vrfproof::from_slice --- stacks-common/src/util/vrf.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/stacks-common/src/util/vrf.rs b/stacks-common/src/util/vrf.rs index 781bf646fe7..bd7cecdeca8 100644 --- a/stacks-common/src/util/vrf.rs +++ b/stacks-common/src/util/vrf.rs @@ -299,10 +299,14 @@ impl VRFProof { c_buf[..16].copy_from_slice(&bytes[32..(16 + 32)]); s_buf[..32].copy_from_slice(&bytes[48..(32 + 48)]); - let c = ed25519_Scalar::from_canonical_bytes(c_buf).ok()?; - let s = ed25519_Scalar::from_canonical_bytes(s_buf).ok()?; - - Some(VRFProof { Gamma: gamma, c, s }) + let c: Option = ed25519_Scalar::from_canonical_bytes(c_buf).into(); + let s: Option = ed25519_Scalar::from_canonical_bytes(s_buf).into(); + + Some(VRFProof { + Gamma: gamma, + c: c?, + s: s?, + }) } _ => None, } From c2df444e776fd0f867715b71805970749702e4d6 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 21 Mar 2025 17:56:36 -0500 Subject: [PATCH 181/238] more test cleanup * follower_bootup_across_multiple_cycles at least fails faster now: the failure scenario for this test involved not making it to nakamoto blocks, this puts a tighter timeout on that aspect of the test. * block_proposal_max_age_rejections: make the message assertions specifically about the proposed test blocks, not any proposals that the miner might otherwise be trying to submit. --- .../src/tests/nakamoto_integrations.rs | 27 ++++++- testnet/stacks-node/src/tests/signer/v0.rs | 74 ++++++++++--------- 2 files changed, 64 insertions(+), 37 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b6b4bb76451..4c2053a335f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4101,8 +4101,31 @@ fn follower_bootup_across_multiple_cycles() { debug!("Booted follower-thread"); - // Wait a long time for the follower to catch up because CI is slow. - wait_for(600, || { + // Wait some time for the follower to at least get some nakamoto blocks + wait_for(120, || { + thread::sleep(Duration::from_secs(5)); + let Ok(follower_node_info) = get_chain_info_result(&follower_conf) else { + return Ok(false); + }; + + let block_id = StacksBlockId::new( + &follower_node_info.stacks_tip_consensus_hash, + &follower_node_info.stacks_tip, + ); + let tip = NakamotoChainState::get_block_header(chainstate.db(), &block_id) + .unwrap() + .unwrap(); + info!( + "Latest follower tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + Ok(tip.anchored_header.as_stacks_nakamoto().is_some()) + }) + .unwrap(); + + wait_for(480, || { sleep_ms(1000); let Ok(follower_node_info) = get_chain_info_result(&follower_conf) else { return Ok(false); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7097f0d3a74..88abdbca33f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -8163,47 +8163,51 @@ fn block_proposal_max_age_rejections() { info!("------------------------- Test Block Proposal Rejected -------------------------"); // Verify the signers rejected only the SECOND block proposal. The first was not even processed. - wait_for(30, || { - let rejections = test_observer::get_stackerdb_chunks() + wait_for(120, || { + let mut status_map = HashMap::new(); + for chunk in test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) - .map(|chunk| { - let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - else { - return None; - }; - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { - signer_signature_hash, - signature, - .. - })) => { - assert_eq!( - signer_signature_hash, block_signer_signature_hash_2, - "We should only reject the second block" - ); - Some(signature) - } - SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { - signer_signature_hash, - .. - })) => { - assert_ne!( - signer_signature_hash, block_signer_signature_hash_1, - "We should never have accepted block" - ); - None - } - _ => None, + { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signer_signature_hash, + .. + })) => { + let entry = status_map.entry(signer_signature_hash).or_insert((0, 0)); + entry.0 += 1; } - }); - Ok(rejections.count() > num_signers * 7 / 10) + SignerMessage::BlockResponse(BlockResponse::Accepted(BlockAccepted { + signer_signature_hash, + .. + })) => { + let entry = status_map.entry(signer_signature_hash).or_insert((0, 0)); + entry.1 += 1; + } + _ => continue, + } + } + let block_1_status = status_map + .get(&block_signer_signature_hash_1) + .cloned() + .unwrap_or((0, 0)); + assert_eq!(block_1_status, (0, 0)); + + let block_2_status = status_map + .get(&block_signer_signature_hash_2) + .cloned() + .unwrap_or((0, 0)); + assert_eq!(block_2_status.1, 0, "Block 2 should always be rejected"); + + info!("Block 2 status"; "accepted" => block_2_status.1, "rejected" => block_2_status.0); + Ok(block_2_status.0 > num_signers * 7 / 10) }) .expect("Timed out waiting for block rejections"); - info!("------------------------- Test Peer Info-------------------------"); - assert_eq!(info_before, get_chain_info(&signer_test.running_nodes.conf)); - info!("------------------------- Test Shutdown-------------------------"); signer_test.shutdown(); } From aabae16af5f9c5a6de2752d849a74eb1dc850463 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Fri, 21 Mar 2025 13:46:17 +0100 Subject: [PATCH 182/238] fix: properly configuring lf eol management on .gitattributes, #4613 --- .gitattributes | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.gitattributes b/.gitattributes index 81cf2d82850..e5f2cf06c8d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,6 @@ legacy/* linguist-vendored -* text=lf +# Enforcing 'lf' eol mainly for: +# - 'stx-genesis' package, where txt files need hash computation and comparison +# - 'clarity' package, where clariy language is sentitive to line endings for .clar files +# anyhow, setting eol for all text files to have an omegeneous management over the whole code base +* text eol=lf From 9609f536001f6e65fc07fcede5c72092c08a8feb Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Sun, 23 Mar 2025 16:02:29 +0100 Subject: [PATCH 183/238] chore: configure vscode default files eol to LF, #4613 --- .vscode/settings.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index a52a694e333..4435fc28484 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -7,5 +7,6 @@ }, "rust-analyzer.rustfmt.extraArgs": [ "+nightly" - ] + ], + "files.eol": "\n" } \ No newline at end of file From a3cdbd52d6d8d207ce4a4e28166669c849f6ad8b Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Sun, 23 Mar 2025 17:29:51 +0100 Subject: [PATCH 184/238] chore: update readme, #4613 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5e0aa26dbea..9f8f38c4d13 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,8 @@ _Warning, this typically takes a few minutes_ cargo nextest run ``` +_On Windows, many tests will fail, mainly due to parallelism. To mitigate the issue you may need to run the tests individually._ + ## Run the testnet You can observe the state machine in action locally by running: @@ -85,8 +87,6 @@ You can observe the state machine in action locally by running: cargo run --bin stacks-node -- start --config ./sample/conf/testnet-follower-conf.toml ``` -_On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ - Additional testnet documentation is available [here](./docs/testnet.md) and [here](https://docs.stacks.co/docs/nodes-and-miners/miner-testnet) ## Release Process From 19a27d5cf0e1892e723e51d03958e831c061bd28 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Mon, 24 Mar 2025 06:45:55 +0100 Subject: [PATCH 185/238] renamed to execute_with_limited_execution_time --- clarity/src/vm/mod.rs | 2 +- clarity/src/vm/tests/simple_apply_eval.rs | 8 ++++---- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index c274a5c893b..7f7f0d1541f 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -607,7 +607,7 @@ pub fn execute(program: &str) -> Result> { /// Execute for test in Clarity1, Epoch20, testnet. #[cfg(any(test, feature = "testing"))] -pub fn execute_with_max_execution_time( +pub fn execute_with_limited_execution_time( program: &str, max_execution_time: std::time::Duration, ) -> Result> { diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index c8049ec2ca2..59f08437545 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -40,9 +40,9 @@ use crate::vm::types::{ }; use crate::vm::{ eval, execute as vm_execute, execute_v2 as vm_execute_v2, - execute_with_max_execution_time as vm_execute_with_max_execution_time, execute_with_parameters, - CallStack, ClarityVersion, ContractContext, CostErrors, Environment, GlobalContext, - LocalContext, Value, + execute_with_limited_execution_time as vm_execute_with_limited_execution_time, + execute_with_parameters, CallStack, ClarityVersion, ContractContext, CostErrors, Environment, + GlobalContext, LocalContext, Value, }; #[test] @@ -1771,7 +1771,7 @@ fn test_chain_id() { #[test] fn test_execution_time_expiration() { assert_eq!( - vm_execute_with_max_execution_time("(+ 1 1)", Duration::from_secs(0)) + vm_execute_with_limited_execution_time("(+ 1 1)", Duration::from_secs(0)) .err() .unwrap(), CostErrors::ExecutionTimeExpired.into() diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 39e718c73fb..3f92aeb5df3 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12554,8 +12554,8 @@ fn miner_rejection_by_contract_call_execution_time_expired() { signer_config.tenure_last_block_proposal_timeout = Duration::from_secs(1800); signer_config.first_proposal_burn_block_timing = Duration::from_secs(1800); }, - |config| config.miner.max_execution_time = Some(0), - |config| config.miner.max_execution_time = None, + |config| config.miner.max_execution_time_secs = Some(0), + |config| config.miner.max_execution_time_secs = None, ); let rl1_skip_commit_op = miners .signer_test From 03178f5201f5f149274bd2e089712df76c1c1f35 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 24 Mar 2025 09:09:59 -0500 Subject: [PATCH 186/238] remove unnecessary extern crate --- stackslib/src/main.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index d5866507c8f..6dd0a316cd6 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -22,8 +22,6 @@ #[macro_use] extern crate stacks_common; -extern crate slog; - #[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] use tikv_jemallocator::Jemalloc; From 4b1b020bf72bf15f3ec6cab6de1f13b310101bbd Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Thu, 20 Mar 2025 13:20:51 +0100 Subject: [PATCH 187/238] consolidate label for signer_signature_hash --- libsigner/src/v0/messages.rs | 4 +- stacks-signer/src/chainstate.rs | 38 +++++++++---------- stacks-signer/src/client/stacks_client.rs | 6 +-- stacks-signer/src/signerdb.rs | 6 +-- stacks-signer/src/v0/signer.rs | 30 +++++++-------- stackslib/src/net/api/postblock_proposal.rs | 2 +- .../stacks-node/src/nakamoto_node/miner.rs | 14 +++---- .../src/nakamoto_node/signer_coordinator.rs | 6 +-- .../src/nakamoto_node/stackerdb_listener.rs | 12 +++--- .../src/tests/nakamoto_integrations.rs | 26 +++++++------ testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 11 files changed, 75 insertions(+), 71 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 7103ee98386..351c6967694 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -873,14 +873,14 @@ impl std::fmt::Display for BlockResponse { BlockResponse::Accepted(a) => { write!( f, - "BlockAccepted: signer_sighash = {}, signature = {}, version = {}", + "BlockAccepted: signer_signature_hash = {}, signature = {}, version = {}", a.signer_signature_hash, a.signature, a.metadata.server_version ) } BlockResponse::Rejected(r) => { write!( f, - "BlockRejected: signer_sighash = {}, code = {}, reason = {}, signature = {}, version = {}", + "BlockRejected: signer_signature_hash = {}, code = {}, reason = {}, signature = {}, version = {}", r.reason_code, r.reason, r.signer_signature_hash, r.signature, r.metadata.server_version ) } diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 8654450738a..73456409983 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -284,7 +284,7 @@ impl SortitionsView { warn!( "Miner block proposal has bitvec field which punishes in disagreement with signer. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), ); @@ -322,7 +322,7 @@ impl SortitionsView { warn!( "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), ); @@ -333,7 +333,7 @@ impl SortitionsView { warn!( "Miner block proposal pubkey does not match the winning pubkey hash for its sortition. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_block_pubkey" => &block_pk.to_hex(), "proposed_block_pubkey_hash" => %block_pkh, "sortition_winner_pubkey_hash" => %proposed_by.state().miner_pkh, @@ -348,7 +348,7 @@ impl SortitionsView { warn!( "Current miner behaved improperly, this signer views the miner as invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), ); return Err(RejectReason::InvalidMiner); } @@ -362,7 +362,7 @@ impl SortitionsView { warn!( "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "current_sortition_miner_status" => ?self.cur_sortition.miner_status, "last_sortition" => %last_sortition.consensus_hash ); @@ -407,7 +407,7 @@ impl SortitionsView { warn!( "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "extend_timestamp" => extend_timestamp, "epoch_time" => epoch_time, ); @@ -435,7 +435,7 @@ impl SortitionsView { info!( "Most recent miner's tenure does not build off the prior sortition, checking if this is valid behavior"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "sortition_state.consensus_hash" => %sortition_state.consensus_hash, "sortition_state.prior_sortition" => %sortition_state.prior_sortition, "sortition_state.parent_tenure_id" => %sortition_state.parent_tenure_id, @@ -449,7 +449,7 @@ impl SortitionsView { if tenures_reorged.is_empty() { warn!("Miner is not building off of most recent tenure, but stacks node was unable to return information about the relevant sortitions. Marking miner invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), ); return Ok(false); } @@ -471,7 +471,7 @@ impl SortitionsView { warn!( "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already more than one globally accepted block."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, @@ -488,7 +488,7 @@ impl SortitionsView { warn!( "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks, and there is no local knowledge for that tenure's block timing."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, @@ -515,7 +515,7 @@ impl SortitionsView { info!( "Miner is not building off of most recent tenure. A tenure they reorg has already mined blocks, but the block was poorly timed, allowing the reorg."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_block_height" => block.header.chain_length, "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, @@ -537,7 +537,7 @@ impl SortitionsView { warn!( "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, @@ -612,7 +612,7 @@ impl SortitionsView { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, "expected_at_least" => info.block.header.chain_length + 1, ); @@ -641,7 +641,7 @@ impl SortitionsView { warn!( "Miner block proposal contains a tenure change, but failed to fetch the tenure tip for the parent tenure: {e:?}. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %tenure_change.prev_tenure_consensus_hash, ); return Ok(false); @@ -669,7 +669,7 @@ impl SortitionsView { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, "expected_at_least" => tip_height + 1, ); @@ -722,8 +722,8 @@ impl SortitionsView { warn!( "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "last_in_tenure_signer_sighash" => %last_in_current_tenure.block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "last_in_tenure_signer_signature_hash" => %last_in_current_tenure.block.header.signer_signature_hash(), ); return Err(RejectReason::DuplicateBlockFound); } @@ -741,7 +741,7 @@ impl SortitionsView { info!( "Have no accepted blocks in the tenure, assuming block confirmation is correct"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_block_height" => block.header.chain_length, ); return Ok(true); @@ -752,7 +752,7 @@ impl SortitionsView { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, "expected_at_least" => last_known_block.block.header.chain_length + 1, ); diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index db0b356fb40..51ee042b7bf 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -315,7 +315,7 @@ impl StacksClient { /// Submit the block proposal to the stacks node. The block will be validated and returned via the HTTP endpoint for Block events. pub fn submit_block_for_validation(&self, block: NakamotoBlock) -> Result<(), ClientError> { debug!("StacksClient: Submitting block for validation"; - "signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, ); @@ -599,7 +599,7 @@ impl StacksClient { /// In tests, this panics if the retry takes longer than 30 seconds. pub fn post_block_until_ok(&self, log_fmt: &F, block: &NakamotoBlock) -> bool { debug!("StacksClient: Posting block to stacks node"; - "signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, ); @@ -629,7 +629,7 @@ impl StacksClient { /// was rejected. pub fn post_block(&self, block: &NakamotoBlock) -> Result { debug!("StacksClient: Posting block to the stacks node"; - "signer_sighash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "block_id" => %block.header.block_id(), "block_height" => %block.header.chain_length, ); diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 5d166a3ecfb..228935099b3 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -963,7 +963,7 @@ impl SignerDb { debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, "burn_block_height" => %block_info.burn_block_height, - "sighash" => %hash, + "signer_signature_hash" => %hash, "block_id" => %block_id, "signed" => %signed_over, "broadcasted" => ?broadcasted, @@ -1016,7 +1016,7 @@ impl SignerDb { ]; debug!("Inserting block signature."; - "sighash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "signature" => %signature); self.db.execute(qry, args)?; @@ -1052,7 +1052,7 @@ impl SignerDb { ]; debug!("Inserting block rejection."; - "block_sighash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "signer_address" => %addr, "reject_reason" => %reject_reason ); diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2decd05dc16..aae9ae731c3 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -214,7 +214,7 @@ impl SignerTrait for Signer { let Some(miner_pubkey) = block_proposal.block.header.recover_miner_pk() else { warn!("{self}: Failed to recover miner pubkey"; - "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "signer_signature_hash" => %block_proposal.block.header.signer_signature_hash(), "consensus_hash" => %block_proposal.block.header.consensus_hash); continue; }; @@ -232,7 +232,7 @@ impl SignerTrait for Signer { "{self}: Got block pushed message"; "block_id" => %b.block_id(), "block_height" => b.header.chain_length, - "signer_sighash" => %b.header.signer_signature_hash(), + "signer_signature_hash" => %b.header.signer_signature_hash(), ); #[cfg(any(test, feature = "testing"))] if self.test_skip_block_broadcast(b) { @@ -432,7 +432,7 @@ impl Signer { .inspect_err(|e| { warn!( "{self}: Failed to update sortition view: {e:?}"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_id, ) }) @@ -452,7 +452,7 @@ impl Signer { Err(RejectReason::ConnectivityIssues(e)) => { warn!( "{self}: Error checking block proposal: {e}"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_id, ); Some(self.create_block_rejection(RejectReason::ConnectivityIssues(e), block)) @@ -461,7 +461,7 @@ impl Signer { Err(reject_code) => { warn!( "{self}: Block proposal invalid"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_id, "reject_reason" => %reject_code, "reject_code" => ?reject_code, @@ -474,7 +474,7 @@ impl Signer { } else { warn!( "{self}: Cannot validate block, no sortition view"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_id, ); Some(self.create_block_rejection(RejectReason::NoSortitionView, block)) @@ -547,7 +547,7 @@ impl Signer { { // Block is too old. Drop it with a warning. Don't even bother broadcasting to the node. warn!("{self}: Received a block proposal that is more than {} secs old. Ignoring...", self.block_proposal_max_age_secs; - "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "signer_signature_hash" => %block_proposal.block.header.signer_signature_hash(), "block_id" => %block_proposal.block.block_id(), "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, @@ -576,7 +576,7 @@ impl Signer { info!( "{self}: received a block proposal for a new block."; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, @@ -595,7 +595,7 @@ impl Signer { .inspect_err(|e| { warn!( "{self}: Failed to update sortition view: {e:?}"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), ) }) @@ -625,7 +625,7 @@ impl Signer { // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. info!( "{self}: submitting block proposal for validation"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), "block_height" => block_proposal.block.header.chain_length, "burn_height" => block_proposal.burn_height, @@ -730,7 +730,7 @@ impl Signer { } Err(e) => { warn!("{self}: Error checking block proposal: {e}"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %proposed_block.block_id() ); return Some(self.create_block_rejection( @@ -753,7 +753,7 @@ impl Signer { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %proposed_block_consensus_hash, - "proposed_block_signer_sighash" => %signer_signature_hash, + "proposed_block_signer_signature_hash" => %signer_signature_hash, "proposed_chain_length" => proposed_block.header.chain_length, "expected_at_least" => last_block_info.block.header.chain_length + 1, ); @@ -766,7 +766,7 @@ impl Signer { Ok(_) => {} Err(e) => { warn!("{self}: Failed to check block against signer db: {e}"; - "signer_sighash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "block_id" => %proposed_block.block_id() ); return Some(self.create_block_rejection( @@ -1011,7 +1011,7 @@ impl Signer { // This is weird. If this is reached, its probably an error in code logic or the db was flushed. // Why are we tracking a block submission for a block we have never seen / stored before. error!("{self}: tracking an unknown block validation submission."; - "signer_sighash" => %proposal_signer_sighash, + "signer_signature_hash" => %proposal_signer_sighash, ); return; } @@ -1024,7 +1024,7 @@ impl Signer { // Reject it so we aren't holding up the network because of our inaction. warn!( "{self}: Failed to receive block validation response within {} ms. Rejecting block.", self.block_proposal_validation_timeout.as_millis(); - "signer_sighash" => %proposal_signer_sighash, + "signer_signature_hash" => %proposal_signer_sighash, ); let rejection = self.create_block_rejection( RejectReason::ConnectivityIssues( diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 7047eba6109..73c75924d6d 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -753,7 +753,7 @@ impl RPCRequestHandler for RPCBlockProposalRequestHandler { info!( "Received block proposal request"; - "signer_sighash" => %block_proposal.block.header.signer_signature_hash(), + "signer_signature_hash" => %block_proposal.block.header.signer_signature_hash(), "block_header_hash" => %block_proposal.block.header.block_hash(), "height" => block_proposal.block.header.chain_length, "tx_count" => block_proposal.block.txs.len(), diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 7155cf5966b..58ec06f2508 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -470,7 +470,7 @@ impl BlockMinerThread { }; error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; - "signer_sighash" => %new_block.header.signer_signature_hash(), + "signer_signature_hash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); @@ -583,7 +583,7 @@ impl BlockMinerThread { Err(e) => match e { NakamotoNodeError::StacksTipChanged => { info!("Stacks tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), + "signer_signature_hash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); @@ -591,7 +591,7 @@ impl BlockMinerThread { } NakamotoNodeError::BurnchainTipChanged => { info!("Burnchain tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), + "signer_signature_hash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); @@ -600,7 +600,7 @@ impl BlockMinerThread { NakamotoNodeError::StackerDBUploadError(ref ack) => { if ack.code == Some(StackerDBErrorCodes::BadSigner.code()) { error!("Error while gathering signatures: failed to upload miner StackerDB data: {ack:?}. Giving up."; - "signer_sighash" => %new_block.header.signer_signature_hash(), + "signer_signature_hash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); @@ -624,7 +624,7 @@ impl BlockMinerThread { } else { info!( "Miner: Block signed by signer set and broadcasted"; - "signer_sighash" => %new_block.header.signer_signature_hash(), + "signer_signature_hash" => %new_block.header.signer_signature_hash(), "stacks_block_hash" => %new_block.header.block_hash(), "stacks_block_id" => %new_block.header.block_id(), "block_height" => new_block.header.chain_length, @@ -1267,7 +1267,7 @@ impl BlockMinerThread { self.config .make_nakamoto_block_builder_settings(self.globals.get_miner_status()), // we'll invoke the event dispatcher ourselves so that it calculates the - // correct signer_sighash for `process_mined_nakamoto_block_event` + // correct signer_signature_hash for `process_mined_nakamoto_block_event` Some(&self.event_dispatcher), signer_bitvec_len.unwrap_or(0), ) @@ -1301,7 +1301,7 @@ impl BlockMinerThread { block_metadata.block.header.chain_length, block_metadata.block.header.block_hash(), block_metadata.block.txs.len(); - "signer_sighash" => %block_metadata.block.header.signer_signature_hash(), + "signer_signature_hash" => %block_metadata.block.header.signer_signature_hash(), "consensus_hash" => %block_metadata.block.header.consensus_hash, "parent_block_id" => %block_metadata.block.header.parent_block_id, "timestamp" => block_metadata.block.header.timestamp, diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index b705fb4ddad..f95ec03d49f 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -372,7 +372,7 @@ impl SignerCoordinator { warn!( "Failed to query chainstate for block: {e:?}"; "block_id" => %block_id, - "block_signer_sighash" => %block_signer_sighash, + "signer_signature_hash" => %block_signer_sighash, ); e }) @@ -447,13 +447,13 @@ impl SignerCoordinator { info!( "{}/{} signers vote to reject block", block_status.total_weight_rejected, self.total_weight; - "block_signer_sighash" => %block_signer_sighash, + "signer_signature_hash" => %block_signer_sighash, ); counters.bump_naka_rejected_blocks(); return Err(NakamotoNodeError::SignersRejected); } else if block_status.total_weight_approved >= self.weight_threshold { info!("Received enough signatures, block accepted"; - "block_signer_sighash" => %block_signer_sighash, + "signer_signature_hash" => %block_signer_sighash, ); return Ok(block_status.gathered_signatures.values().cloned().collect()); } else if rejections_timer.elapsed() > *rejections_timeout { diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 42811cd7842..ef2d6bdec99 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -285,7 +285,7 @@ impl StackerDBListener { info!( "StackerDBListener: Received signature for block that we did not request. Ignoring."; "signature" => %signature, - "block_signer_sighash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "slot_id" => slot_id, "signer_set" => self.signer_set, ); @@ -303,7 +303,7 @@ impl StackerDBListener { warn!( "StackerDBListener: Processed signature but didn't validate over the expected block. Ignoring"; "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "slot_id" => slot_id, ); continue; @@ -311,7 +311,7 @@ impl StackerDBListener { if Self::fault_injection_ignore_signatures() { warn!("StackerDBListener: fault injection: ignoring well-formed signature for block"; - "block_signer_sighash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, "signature" => %signature, @@ -332,7 +332,7 @@ impl StackerDBListener { } info!("StackerDBListener: Signature Added to block"; - "block_signer_sighash" => %block_sighash, + "signer_signature_hash" => %block_sighash, "signer_pubkey" => signer_pubkey.to_hex(), "signer_slot_id" => slot_id, "signature" => %signature, @@ -368,7 +368,7 @@ impl StackerDBListener { else { info!( "StackerDBListener: Received rejection for block that we did not request. Ignoring."; - "block_signer_sighash" => %rejected_data.signer_signature_hash, + "signer_signature_hash" => %rejected_data.signer_signature_hash, "slot_id" => slot_id, "signer_set" => self.signer_set, ); @@ -397,7 +397,7 @@ impl StackerDBListener { } info!("StackerDBListener: Signer rejected block"; - "block_signer_sighash" => %rejected_data.signer_signature_hash, + "signer_signature_hash" => %rejected_data.signer_signature_hash, "signer_pubkey" => rejected_pubkey.to_hex(), "signer_slot_id" => slot_id, "signature" => %rejected_data.signature, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b6b4bb76451..8f75b6d6be2 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -403,15 +403,15 @@ pub fn blind_signer_multinode( match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { Ok(signed_block) => { if signed_blocks.contains(&signed_block) { - info!("Already signed block, will sleep and try again"; "signer_sig_hash" => signed_block.to_hex()); + info!("Already signed block, will sleep and try again"; "signer_signature_hash" => signed_block.to_hex()); thread::sleep(Duration::from_secs(5)); match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { Ok(signed_block) => { if signed_blocks.contains(&signed_block) { - info!("Already signed block, ignoring"; "signer_sig_hash" => signed_block.to_hex()); + info!("Already signed block, ignoring"; "signer_signature_hash" => signed_block.to_hex()); continue; } - info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); + info!("Signed block"; "signer_signature_hash" => signed_block.to_hex()); signed_blocks.insert(signed_block); } Err(e) => { @@ -420,7 +420,7 @@ pub fn blind_signer_multinode( }; continue; } - info!("Signed block"; "signer_sig_hash" => signed_block.to_hex()); + info!("Signed block"; "signer_signature_hash" => signed_block.to_hex()); signed_blocks.insert(signed_block); } Err(e) => { @@ -482,7 +482,11 @@ pub fn get_latest_block_proposal( }); for (b, _, is_latest) in proposed_blocks.iter() { - info!("Consider block"; "signer_sighash" => %b.header.signer_signature_hash(), "is_latest_sortition" => is_latest, "chain_height" => b.header.chain_length); + info!("Consider block"; + "signer_signature_hash" => %b.header.signer_signature_hash(), + "is_latest_sortition" => is_latest, + "chain_height" => b.header.chain_length + ); } let Some((proposed_block, miner_addr, _)) = proposed_blocks.pop() else { @@ -540,20 +544,20 @@ pub fn read_and_sign_block_proposal( }) .collect(); let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); - let signer_sig_hash = proposed_block.header.signer_signature_hash(); + let signer_signature_hash = proposed_block.header.signer_signature_hash(); let other_views = other_views_result?; if !other_views.is_empty() { info!( "Fetched block proposals"; - "primary_latest_signer_sighash" => %signer_sig_hash, + "primary_latest_signer_signature_hash" => %signer_signature_hash, "primary_latest_block_height" => proposed_block.header.chain_length, "other_views" => ?other_views, ); } - if signed_blocks.contains(&signer_sig_hash) { + if signed_blocks.contains(&signer_signature_hash) { // already signed off on this block, don't sign again. - return Ok(signer_sig_hash); + return Ok(signer_signature_hash); } let reward_set = load_nakamoto_reward_set( @@ -576,7 +580,7 @@ pub fn read_and_sign_block_proposal( info!( "Fetched proposed block from .miners StackerDB"; "proposed_block_hash" => &proposed_block_hash, - "signer_sig_hash" => &signer_sig_hash.to_hex(), + "signer_signature_hash" => &signer_signature_hash.to_hex(), ); signers.sign_block_with_reward_set(&mut proposed_block, &reward_set); @@ -584,7 +588,7 @@ pub fn read_and_sign_block_proposal( channel .send(proposed_block.header.signer_signature) .unwrap(); - Ok(signer_sig_hash) + Ok(signer_signature_hash) } /// Return a working nakamoto-neon config and the miner's bitcoin address to fund diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index edfdd5f7de6..019112c3f08 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7569,7 +7569,7 @@ fn block_validation_pending_table() { .expect("Failed to get pending block validations"); info!( "----- Waiting for pending block proposal in SignerDB -----"; - "proposed_signer_signature_hash" => block_signer_signature_hash.to_hex(), + "proposed_block_signer_signature_hash" => block_signer_signature_hash.to_hex(), "pending_block_validations_len" => pending_block_validations.len(), "pending_block_validations" => pending_block_validations.iter() .map(|p| p.signer_signature_hash.to_hex()) From cef89379cdeb342153a7b91a1570be9251c4c9be Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Mon, 24 Mar 2025 12:47:36 +0100 Subject: [PATCH 188/238] remove prefix from signer_signature_hash logs --- stacks-signer/src/chainstate.rs | 34 +++++++++---------- .../src/tests/nakamoto_integrations.rs | 2 +- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 73456409983..93757368194 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -284,7 +284,7 @@ impl SortitionsView { warn!( "Miner block proposal has bitvec field which punishes in disagreement with signer. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), ); @@ -322,7 +322,7 @@ impl SortitionsView { warn!( "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), ); @@ -333,7 +333,7 @@ impl SortitionsView { warn!( "Miner block proposal pubkey does not match the winning pubkey hash for its sortition. Considering invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_block_pubkey" => &block_pk.to_hex(), "proposed_block_pubkey_hash" => %block_pkh, "sortition_winner_pubkey_hash" => %proposed_by.state().miner_pkh, @@ -348,7 +348,7 @@ impl SortitionsView { warn!( "Current miner behaved improperly, this signer views the miner as invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), ); return Err(RejectReason::InvalidMiner); } @@ -362,7 +362,7 @@ impl SortitionsView { warn!( "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "current_sortition_miner_status" => ?self.cur_sortition.miner_status, "last_sortition" => %last_sortition.consensus_hash ); @@ -407,7 +407,7 @@ impl SortitionsView { warn!( "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "extend_timestamp" => extend_timestamp, "epoch_time" => epoch_time, ); @@ -435,7 +435,7 @@ impl SortitionsView { info!( "Most recent miner's tenure does not build off the prior sortition, checking if this is valid behavior"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "sortition_state.consensus_hash" => %sortition_state.consensus_hash, "sortition_state.prior_sortition" => %sortition_state.prior_sortition, "sortition_state.parent_tenure_id" => %sortition_state.parent_tenure_id, @@ -449,7 +449,7 @@ impl SortitionsView { if tenures_reorged.is_empty() { warn!("Miner is not building off of most recent tenure, but stacks node was unable to return information about the relevant sortitions. Marking miner invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), ); return Ok(false); } @@ -471,7 +471,7 @@ impl SortitionsView { warn!( "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already more than one globally accepted block."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, @@ -488,7 +488,7 @@ impl SortitionsView { warn!( "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks, and there is no local knowledge for that tenure's block timing."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, @@ -515,7 +515,7 @@ impl SortitionsView { info!( "Miner is not building off of most recent tenure. A tenure they reorg has already mined blocks, but the block was poorly timed, allowing the reorg."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_block_height" => block.header.chain_length, "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, @@ -537,7 +537,7 @@ impl SortitionsView { warn!( "Miner is not building off of most recent tenure, but a tenure they attempted to reorg has already mined blocks."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %sortition_state.parent_tenure_id, "last_sortition" => %sortition_state.prior_sortition, "violating_tenure_id" => %tenure.consensus_hash, @@ -612,7 +612,7 @@ impl SortitionsView { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, "expected_at_least" => info.block.header.chain_length + 1, ); @@ -641,7 +641,7 @@ impl SortitionsView { warn!( "Miner block proposal contains a tenure change, but failed to fetch the tenure tip for the parent tenure: {e:?}. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "parent_tenure" => %tenure_change.prev_tenure_consensus_hash, ); return Ok(false); @@ -669,7 +669,7 @@ impl SortitionsView { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, "expected_at_least" => tip_height + 1, ); @@ -741,7 +741,7 @@ impl SortitionsView { info!( "Have no accepted blocks in the tenure, assuming block confirmation is correct"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_block_height" => block.header.chain_length, ); return Ok(true); @@ -752,7 +752,7 @@ impl SortitionsView { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, - "proposed_block_signer_signature_hash" => %block.header.signer_signature_hash(), + "signer_signature_hash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, "expected_at_least" => last_known_block.block.header.chain_length + 1, ); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 8f75b6d6be2..6a116f94814 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -549,7 +549,7 @@ pub fn read_and_sign_block_proposal( if !other_views.is_empty() { info!( "Fetched block proposals"; - "primary_latest_signer_signature_hash" => %signer_signature_hash, + "signer_signature_hash" => %signer_signature_hash, "primary_latest_block_height" => proposed_block.header.chain_length, "other_views" => ?other_views, ); From 6bc129f59e50dbff47c13f3cda2eebad1a1c1fee Mon Sep 17 00:00:00 2001 From: Francesco Leacche Date: Mon, 24 Mar 2025 16:45:42 +0100 Subject: [PATCH 189/238] block_hash -> signer_signature_hash. last signer_sighash after rebase --- stacks-signer/src/v0/signer.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index aae9ae731c3..603840e8394 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -291,7 +291,7 @@ impl SignerTrait for Signer { } => { debug!( "{self}: Received a new block event."; - "block_hash" => %block_hash, + "signer_signature_hash" => %block_hash, "block_height" => block_height ); if let Ok(Some(mut block_info)) = self @@ -661,7 +661,7 @@ impl Signer { // We are still waiting for a response for this block. Do nothing. debug!( "{self}: Received a block proposal for a block we are already validating."; - "signer_sighash" => %block_info.signer_signature_hash(), + "signer_signature_hash" => %block_info.signer_signature_hash(), "block_id" => %block_info.block.block_id() ); return; @@ -1117,7 +1117,7 @@ impl Signer { // recover public key let Ok(public_key) = rejection.recover_public_key() else { debug!("{self}: Received block rejection with an unrecovarable signature. Will not store."; - "block_hash" => %block_hash, + "signer_signature_hash" => %block_hash, "signature" => %signature ); return; @@ -1133,7 +1133,7 @@ impl Signer { if !is_valid_sig { debug!("{self}: Receive block rejection with an invalid signature. Will not store."; - "block_hash" => %block_hash, + "signer_signature_hash" => %block_hash, "signature" => %signature ); return; @@ -1235,7 +1235,7 @@ impl Signer { else { debug!("{self}: Received unrecovarable signature. Will not store."; "signature" => %signature, - "block_hash" => %block_hash); + "signer_signature_hash" => %block_hash); return; }; From b9a6895bbd60ec2c845c791d45a8a4f7516cd115 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 24 Mar 2025 11:36:44 -0500 Subject: [PATCH 190/238] replace expects/unwraps with fallible vrf routines --- stacks-common/src/util/vrf.rs | 22 +++++++----- stackslib/src/burnchains/tests/mod.rs | 2 +- stackslib/src/chainstate/coordinator/tests.rs | 4 +-- .../src/chainstate/nakamoto/tests/mod.rs | 2 +- testnet/stacks-node/src/keychain.rs | 36 +++++++++++++++---- .../stacks-node/src/nakamoto_node/miner.rs | 11 ++++++ testnet/stacks-node/src/neon_node.rs | 11 ++++++ testnet/stacks-node/src/node.rs | 14 +++++--- 8 files changed, 80 insertions(+), 22 deletions(-) diff --git a/stacks-common/src/util/vrf.rs b/stacks-common/src/util/vrf.rs index bd7cecdeca8..bd124a5da0d 100644 --- a/stacks-common/src/util/vrf.rs +++ b/stacks-common/src/util/vrf.rs @@ -181,6 +181,7 @@ impl VRFPublicKey { pub enum Error { InvalidPublicKey, InvalidDataError, + InvalidHashPoints, OSRNGError(rand::Error), } @@ -189,6 +190,7 @@ impl fmt::Display for Error { match *self { Error::InvalidPublicKey => write!(f, "Invalid public key"), Error::InvalidDataError => write!(f, "No data could be found"), + Error::InvalidHashPoints => write!(f, "VRF hash points did not yield a valid scalar"), Error::OSRNGError(ref e) => fmt::Display::fmt(e, f), } } @@ -199,6 +201,7 @@ impl error::Error for Error { match *self { Error::InvalidPublicKey => None, Error::InvalidDataError => None, + Error::InvalidHashPoints => None, Error::OSRNGError(ref e) => Some(e), } } @@ -474,17 +477,17 @@ impl VRF { /// Convert a 16-byte string into a scalar. /// The upper 16 bytes in the resulting scalar MUST BE 0's - fn ed25519_scalar_from_hash128(hash128: &[u8; 16]) -> ed25519_Scalar { + fn ed25519_scalar_from_hash128(hash128: &[u8; 16]) -> Option { let mut scalar_buf = [0u8; 32]; scalar_buf[0..16].copy_from_slice(hash128); - ed25519_Scalar::from_canonical_bytes(scalar_buf).expect("Invalid scalar") + ed25519_Scalar::from_canonical_bytes(scalar_buf).into() } /// ECVRF proof routine /// https://tools.ietf.org/id/draft-irtf-cfrg-vrf-02.html#rfc.section.5.1 #[allow(clippy::op_ref)] - pub fn prove(secret: &VRFPrivateKey, alpha: &[u8]) -> VRFProof { + pub fn prove(secret: &VRFPrivateKey, alpha: &[u8]) -> Option { let (Y_point, x_scalar, trunc_hash) = VRF::expand_privkey(secret); let H_point = VRF::hash_to_curve(&Y_point, alpha); @@ -495,14 +498,15 @@ impl VRF { let kH_point = &k_scalar * &H_point; let c_hashbuf = VRF::hash_points(&H_point, &Gamma_point, &kB_point, &kH_point); - let c_scalar = VRF::ed25519_scalar_from_hash128(&c_hashbuf); + let c_scalar = VRF::ed25519_scalar_from_hash128(&c_hashbuf)?; let s_scalar = &k_scalar + &c_scalar * &x_scalar; // NOTE: expect() won't panic because c_scalar is guaranteed to have // its upper 16 bytes as 0 VRFProof::new(Gamma_point, c_scalar, s_scalar) - .expect("FATAL ERROR: upper-16 bytes of proof's C scalar are NOT 0") + .inspect_err(|e| error!("FATAL: upper-16 bytes of proof's C scalar are NOT 0: {e}")) + .ok() } /// Given a public key, verify that the private key owner that generate the ECVRF proof did so on the given message. @@ -525,7 +529,9 @@ impl VRF { let V_point = s_reduced * &H_point - proof.c() * proof.Gamma(); let c_prime_hashbuf = VRF::hash_points(&H_point, proof.Gamma(), &U_point, &V_point); - let c_prime = VRF::ed25519_scalar_from_hash128(&c_prime_hashbuf); + let Some(c_prime) = VRF::ed25519_scalar_from_hash128(&c_prime_hashbuf) else { + return Err(Error::InvalidHashPoints); + }; // NOTE: this leverages constant-time comparison inherited from the Scalar impl Ok(c_prime == *(proof.c())) @@ -587,7 +593,7 @@ mod tests { let privk = VRFPrivateKey::from_bytes(&proof_fixture.privkey[..]).unwrap(); let expected_proof_bytes = &proof_fixture.proof[..]; - let proof = VRF::prove(&privk, &alpha.to_vec()); + let proof = VRF::prove(&privk, &alpha.to_vec()).unwrap(); let proof_bytes = proof.to_bytes(); assert_eq!(proof_bytes.to_vec(), expected_proof_bytes.to_vec()); @@ -609,7 +615,7 @@ mod tests { let mut msg = [0u8; 1024]; rng.fill_bytes(&mut msg); - let proof = VRF::prove(&secret_key, &msg); + let proof = VRF::prove(&secret_key, &msg).unwrap(); let res = VRF::verify(&public_key, &proof, &msg).unwrap(); assert!(res); diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 9cfee0a7dee..f587ed3e43d 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -241,7 +241,7 @@ impl TestMiner { ); match self.vrf_key_map.get(vrf_pubkey) { Some(prover_key) => { - let proof = VRF::prove(prover_key, last_sortition_hash.as_bytes()); + let proof = VRF::prove(prover_key, last_sortition_hash.as_bytes())?; let valid = match VRF::verify(vrf_pubkey, &proof, last_sortition_hash.as_bytes()) { Ok(v) => v, Err(e) => false, diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index c7bb456f44d..8bd2dcb5a23 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -646,7 +646,7 @@ fn make_genesis_block_with_recipients( let parent_stacks_header = StacksHeaderInfo::regtest_genesis(); - let proof = VRF::prove(vrf_key, sortition_tip.sortition_hash.as_bytes()); + let proof = VRF::prove(vrf_key, sortition_tip.sortition_hash.as_bytes()).unwrap(); let mut builder = StacksBlockBuilder::make_regtest_block_builder( burnchain, @@ -909,7 +909,7 @@ fn make_stacks_block_with_input( eprintln!("Build off of {:?}", &parent_stacks_header); - let proof = VRF::prove(vrf_key, sortition_tip.sortition_hash.as_bytes()); + let proof = VRF::prove(vrf_key, sortition_tip.sortition_hash.as_bytes()).unwrap(); let total_burn = parents_sortition.total_burn; diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 44e1de41cb3..a56f07bb5d4 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1628,7 +1628,7 @@ fn test_nakamoto_block_static_verification() { let vrf_privkey = VRFPrivateKey::new(); let vrf_pubkey = VRFPublicKey::from_private(&vrf_privkey); let sortition_hash = SortitionHash([0x01; 32]); - let vrf_proof = VRF::prove(&vrf_privkey, sortition_hash.as_bytes()); + let vrf_proof = VRF::prove(&vrf_privkey, sortition_hash.as_bytes()).unwrap(); let burn_recipient = StacksAddress::burn_address(false).to_account_principal(); let alt_recipient = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key_2)) diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index 4e857508809..d69dfe63b87 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -118,14 +118,38 @@ impl Keychain { /// Generate a VRF proof over a given byte message. /// `block_height` must be the _same_ block height called to make_vrf_keypair() - pub fn generate_proof(&self, block_height: u64, bytes: &[u8; 32]) -> VRFProof { + pub fn generate_proof(&self, block_height: u64, bytes: &[u8; 32]) -> Option { let (pk, sk) = self.make_vrf_keypair(block_height); - let proof = VRF::prove(&sk, bytes.as_ref()); + let Some(proof) = VRF::prove(&sk, bytes.as_ref()) else { + error!( + "Failed to generate proof with keypair, will be unable to mine."; + "block_height" => block_height, + "pk" => ?pk + ); + return None; + }; // Ensure that the proof is valid by verifying - let is_valid = VRF::verify(&pk, &proof, bytes.as_ref()).unwrap_or(false); - assert!(is_valid); - proof + let is_valid = VRF::verify(&pk, &proof, bytes.as_ref()) + .inspect_err(|e| { + error!( + "Failed to validate generated proof, will be unable to mine."; + "block_height" => block_height, + "pk" => ?pk, + "err" => %e, + ); + }) + .ok()?; + if !is_valid { + error!( + "Generated invalidat proof, will be unable to mine."; + "block_height" => block_height, + "pk" => ?pk, + ); + None + } else { + Some(proof) + } } /// Generate a microblock signing key for this burnchain block height. @@ -367,7 +391,7 @@ mod tests { }; // Generate the proof - let proof = VRF::prove(vrf_sk, bytes.as_ref()); + let proof = VRF::prove(vrf_sk, bytes.as_ref())?; // Ensure that the proof is valid by verifying let is_valid = VRF::verify(vrf_pk, &proof, bytes.as_ref()).unwrap_or(false); assert!(is_valid); diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 7155cf5966b..01b45d6ec13 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -1127,6 +1127,17 @@ impl BlockMinerThread { ) }; + let Some(vrf_proof) = vrf_proof else { + error!( + "Unable to generate VRF proof, will be unable to mine"; + "burn_block_sortition_hash" => %self.burn_election_block.sortition_hash, + "burn_block_block_height" => %self.burn_block.block_height, + "burn_block_hash" => %self.burn_block.burn_header_hash, + "vrf_pubkey" => &self.registered_key.vrf_public_key.to_hex() + ); + return None; + }; + debug!( "Generated VRF Proof: {} over {} ({},{}) with key {}", vrf_proof.to_hex(), diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 3b970dc4d45..6cba1f9c837 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1730,6 +1730,17 @@ impl BlockMinerThread { ) }; + let Some(vrf_proof) = vrf_proof else { + error!( + "Unable to generate VRF proof, will be unable to mine"; + "burn_block_sortition_hash" => %self.burn_block.sortition_hash, + "burn_block_block_height" => %self.burn_block.block_height, + "burn_block_hash" => %self.burn_block.burn_header_hash, + "vrf_pubkey" => &self.registered_key.vrf_public_key.to_hex() + ); + return None; + }; + debug!( "Generated VRF Proof: {} over {} ({},{}) with key {}", vrf_proof.to_hex(), diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 093f19c8ada..8abe84a7d51 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -659,10 +659,13 @@ impl Node { .expect("FATAL: failed to query canonical burn chain tip"); // Generates a proof out of the sortition hash provided in the params. - let vrf_proof = self.keychain.generate_proof( + let Some(vrf_proof) = self.keychain.generate_proof( registered_key.target_block_height, tip.sortition_hash.as_bytes(), - ); + ) else { + warn!("Failed to generate VRF proof, will be unable to initiate new tenure"); + return None; + }; // Generates a new secret key for signing the trail of microblocks // of the upcoming tenure. @@ -731,10 +734,13 @@ impl Node { if self.active_registered_key.is_some() { let registered_key = self.active_registered_key.clone().unwrap(); - let vrf_proof = self.keychain.generate_proof( + let Some(vrf_proof) = self.keychain.generate_proof( registered_key.target_block_height, burnchain_tip.block_snapshot.sortition_hash.as_bytes(), - ); + ) else { + warn!("Failed to generate VRF proof, will be unable to mine commits"); + return; + }; let op = self.generate_block_commit_op( anchored_block_from_ongoing_tenure.header.block_hash(), From 89e7a9d7a9d37a3c6c4d3beca042cb9e9ae6da6a Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Mon, 24 Mar 2025 19:05:32 +0100 Subject: [PATCH 191/238] chore: fix spelling error in comments, #4613 --- .gitattributes | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitattributes b/.gitattributes index e5f2cf06c8d..1114324eb0a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,5 +2,5 @@ legacy/* linguist-vendored # Enforcing 'lf' eol mainly for: # - 'stx-genesis' package, where txt files need hash computation and comparison # - 'clarity' package, where clariy language is sentitive to line endings for .clar files -# anyhow, setting eol for all text files to have an omegeneous management over the whole code base +# anyhow, setting eol for all text files to have a homogeneous management over the whole code base * text eol=lf From e3286046e65a95e8763e8d784dab40b016a6d60f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 24 Mar 2025 14:19:28 -0500 Subject: [PATCH 192/238] remove unnecessary slog imports --- stacks-signer/src/v0/signer_state.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/stacks-signer/src/v0/signer_state.rs b/stacks-signer/src/v0/signer_state.rs index f97c6737cd7..720a59dab5f 100644 --- a/stacks-signer/src/v0/signer_state.rs +++ b/stacks-signer/src/v0/signer_state.rs @@ -22,7 +22,6 @@ use libsigner::v0::messages::{ StateMachineUpdateMinerState, }; use serde::{Deserialize, Serialize}; -use slog::{slog_info, slog_warn}; use stacks_common::bitvec::BitVec; use stacks_common::codec::Error as CodecError; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; From b6d9604f398c947add61685c61c832f3b25cfb14 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 24 Mar 2025 16:13:24 -0400 Subject: [PATCH 193/238] refactor: improvements to nonce cache from review --- stackslib/src/core/nonce_cache.rs | 57 ++++++++++++++----------------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/stackslib/src/core/nonce_cache.rs b/stackslib/src/core/nonce_cache.rs index e15ff36151e..c30a2451928 100644 --- a/stackslib/src/core/nonce_cache.rs +++ b/stackslib/src/core/nonce_cache.rs @@ -1,4 +1,4 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation +// Copyright (C) 2025 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -67,38 +67,31 @@ impl NonceCache { C: ClarityConnection, { // Check in-memory cache - match self.cache.get(address) { - Some(nonce) => nonce, - None => { - // Check sqlite cache - let opt_nonce = match db_get_nonce(mempool_db, address) { - Ok(opt_nonce) => opt_nonce, - Err(e) => { - warn!("error retrieving nonce from mempool db: {}", e); - None - } - }; - match opt_nonce { - Some(nonce) => { - // Insert into in-memory cache, but it is not dirty, - // since we just got it from the database. - let evicted = self.cache.insert_clean(address.clone(), nonce); - if evicted.is_some() { - // If we evicted something, we need to flush the cache. - self.flush_with_evicted(mempool_db, evicted); - } - nonce - } - None => { - let nonce = - StacksChainState::get_nonce(clarity_tx, &address.clone().into()); - - self.set(address.clone(), nonce, mempool_db); - nonce - } - } + if let Some(cached_nonce) = self.cache.get(address) { + return cached_nonce; + }; + + // Check sqlite cache + let db_nonce_opt = db_get_nonce(mempool_db, address).unwrap_or_else(|e| { + warn!("error retrieving nonce from mempool db: {e}"); + None + }); + if let Some(db_nonce) = db_nonce_opt { + // Insert into in-memory cache, but it is not dirty, + // since we just got it from the database. + let evicted = self.cache.insert_clean(address.clone(), db_nonce); + if evicted.is_some() { + // If we evicted something, we need to flush the cache. + self.flush_with_evicted(mempool_db, evicted); } + return db_nonce; } + + // Check the chainstate + let nonce = StacksChainState::get_nonce(clarity_tx, &address.clone().into()); + + self.set(address.clone(), nonce, mempool_db); + nonce } /// Set the nonce for `address` to `value` in the in-memory cache. @@ -164,7 +157,7 @@ impl NonceCache { Ok(()) } - /// Flush the in-memory cache the the DB. + /// Flush the in-memory cache to the DB. /// Do not return until successful. pub fn flush(&mut self, conn: &mut DBConn) { self.flush_with_evicted(conn, None) From 1c7008ebcc5d07f3222057caaca666b567b5030a Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 25 Mar 2025 08:26:56 +0100 Subject: [PATCH 194/238] added max_execution_time to initialize_smart_contract --- clarity/src/vm/clarity.rs | 6 ++++ stackslib/src/chainstate/nakamoto/mod.rs | 1 + .../chainstate/stacks/boot/contract_tests.rs | 10 +++++- .../src/chainstate/stacks/db/transactions.rs | 1 + stackslib/src/clarity_vm/clarity.rs | 17 ++++++++- .../src/clarity_vm/tests/analysis_costs.rs | 3 ++ stackslib/src/clarity_vm/tests/contracts.rs | 35 +++++++++++++++++-- stackslib/src/clarity_vm/tests/costs.rs | 2 ++ .../src/clarity_vm/tests/large_contract.rs | 22 ++++++++---- .../src/util_lib/signed_structured_data.rs | 1 + 10 files changed, 88 insertions(+), 10 deletions(-) diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index 1ef22cbc58b..cbe71d482e5 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -353,12 +353,18 @@ pub trait TransactionConnection: ClarityConnection { contract_str: &str, sponsor: Option, abort_call_back: F, + max_execution_time: Option, ) -> Result<(AssetMap, Vec), Error> where F: FnOnce(&AssetMap, &mut ClarityDatabase) -> bool, { let (_, asset_map, events, aborted) = self.with_abort_callback( |vm_env| { + if let Some(max_execution_time_duration) = max_execution_time { + vm_env + .context + .set_max_execution_time(max_execution_time_duration); + } vm_env .initialize_contract_from_ast( identifier.clone(), diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index c5df44c6189..6eeebc54dbb 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -4896,6 +4896,7 @@ impl NakamotoChainState { &contract_content, None, |_, _| false, + None, ) .unwrap(); clarity.save_analysis(&contract_id, &analysis).unwrap(); diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index b39635dd92f..4ac645e6bd7 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -1780,7 +1780,15 @@ fn test_deploy_smart_contract( block.as_transaction(|tx| { let (ast, analysis) = tx.analyze_smart_contract(contract_id, version, content, ASTRules::PrecheckSize)?; - tx.initialize_smart_contract(contract_id, version, &ast, content, None, |_, _| false)?; + tx.initialize_smart_contract( + contract_id, + version, + &ast, + content, + None, + |_, _| false, + None, + )?; tx.save_analysis(contract_id, &analysis)?; return Ok(()); }) diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 22e3797e454..20eef9f7719 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -1282,6 +1282,7 @@ impl StacksChainState { ) .expect("FATAL: error while evaluating post-conditions") }, + max_execution_time, ); let mut total_cost = clarity_tx.cost_so_far(); diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index 02832512bdb..f73593f4dd2 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -387,6 +387,7 @@ impl ClarityInstance { BOOT_CODE_COSTS, None, |_, _| false, + None, ) .unwrap(); }); @@ -408,6 +409,7 @@ impl ClarityInstance { &*BOOT_CODE_COST_VOTING, None, |_, _| false, + None, ) .unwrap(); @@ -433,6 +435,7 @@ impl ClarityInstance { &*BOOT_CODE_POX_TESTNET, None, |_, _| false, + None, ) .unwrap(); }); @@ -484,6 +487,7 @@ impl ClarityInstance { BOOT_CODE_COSTS_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -505,6 +509,7 @@ impl ClarityInstance { BOOT_CODE_COSTS_3, None, |_, _| false, + None, ) .unwrap(); }); @@ -526,6 +531,7 @@ impl ClarityInstance { &*POX_2_TESTNET_CODE, None, |_, _| false, + None, ) .unwrap(); }); @@ -2051,6 +2057,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -2104,6 +2111,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); tx.save_analysis(&contract_identifier, &ct_analysis) @@ -2132,6 +2140,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); tx.save_analysis(&contract_identifier, &ct_analysis) @@ -2163,7 +2172,8 @@ mod tests { &ct_ast, contract, None, - |_, _| false + |_, _| false, + None ) .unwrap_err() ) @@ -2216,6 +2226,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -2277,6 +2288,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -2369,6 +2381,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -2500,6 +2513,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -2891,6 +2905,7 @@ mod tests { contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) diff --git a/stackslib/src/clarity_vm/tests/analysis_costs.rs b/stackslib/src/clarity_vm/tests/analysis_costs.rs index b60d20e34a6..0114cc34184 100644 --- a/stackslib/src/clarity_vm/tests/analysis_costs.rs +++ b/stackslib/src/clarity_vm/tests/analysis_costs.rs @@ -132,6 +132,7 @@ fn setup_tracked_cost_test( contract_trait, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&trait_contract_id, &ct_analysis) @@ -165,6 +166,7 @@ fn setup_tracked_cost_test( contract_other, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&other_contract_id, &ct_analysis) @@ -240,6 +242,7 @@ fn test_tracked_costs( &contract_self, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&self_contract_id, &ct_analysis).unwrap(); diff --git a/stackslib/src/clarity_vm/tests/contracts.rs b/stackslib/src/clarity_vm/tests/contracts.rs index bf54fb14b0d..16b83146c78 100644 --- a/stackslib/src/clarity_vm/tests/contracts.rs +++ b/stackslib/src/clarity_vm/tests/contracts.rs @@ -132,6 +132,7 @@ fn test_get_burn_block_info_eval() { contract, None, |_, _| false, + None, ) .unwrap(); }); @@ -247,7 +248,7 @@ fn test_get_block_info_eval_v210() { .analyze_smart_contract(&contract_identifier, clarity_version, contract, ASTRules::PrecheckSize) .unwrap(); clarity_db - .initialize_smart_contract(&contract_identifier, clarity_version, &ast, contract, None, |_, _| false) + .initialize_smart_contract(&contract_identifier, clarity_version, &ast, contract, None, |_, _| false, None) .unwrap(); }); let mut tx = conn.start_transaction_processing(); @@ -326,7 +327,15 @@ fn publish_contract( bc.as_transaction(|tx| { let (ast, analysis) = tx.analyze_smart_contract(contract_id, version, contract, ASTRules::PrecheckSize)?; - tx.initialize_smart_contract(contract_id, version, &ast, contract, None, |_, _| false)?; + tx.initialize_smart_contract( + contract_id, + version, + &ast, + contract, + None, + |_, _| false, + None, + )?; tx.save_analysis(contract_id, &analysis)?; Ok(()) }) @@ -616,6 +625,7 @@ fn trait_with_trait_invocation_cross_epoch() { math_trait, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -640,6 +650,7 @@ fn trait_with_trait_invocation_cross_epoch() { compute_trait, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -664,6 +675,7 @@ fn trait_with_trait_invocation_cross_epoch() { impl_compute, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -688,6 +700,7 @@ fn trait_with_trait_invocation_cross_epoch() { impl_math, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -712,6 +725,7 @@ fn trait_with_trait_invocation_cross_epoch() { use_compute, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -743,6 +757,7 @@ fn trait_with_trait_invocation_cross_epoch() { use_compute, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -767,6 +782,7 @@ fn trait_with_trait_invocation_cross_epoch() { use_compute, None, |_, _| false, + None, ) .unwrap(); clarity_db @@ -955,6 +971,7 @@ fn test_block_heights() { contract_clarity1, None, |_, _| false, + None ).unwrap(); // analyze the contracts as Clarity 2 @@ -1014,6 +1031,7 @@ fn test_block_heights() { contract_clarity3, None, |_, _| false, + None ).unwrap(); }); @@ -1233,6 +1251,7 @@ fn test_block_heights_across_versions() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1263,6 +1282,7 @@ fn test_block_heights_across_versions() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1295,6 +1315,7 @@ fn test_block_heights_across_versions() { &contract_e3c3, None, |_, _| false, + None, ) .unwrap(); }); @@ -1362,6 +1383,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1389,6 +1411,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1421,6 +1444,7 @@ fn test_block_heights_across_versions_traits_3_from_2() { &contract_e3c3, None, |_, _| false, + None, ) .unwrap(); }); @@ -1507,6 +1531,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1534,6 +1559,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { contract_e2c1_2, None, |_, _| false, + None, ) .unwrap(); }); @@ -1566,6 +1592,7 @@ fn test_block_heights_across_versions_traits_2_from_3() { &contract_e3c3, None, |_, _| false, + None, ) .unwrap(); }); @@ -1642,6 +1669,7 @@ fn test_block_heights_at_block() { contract, None, |_, _| false, + None ).unwrap(); }); @@ -1704,6 +1732,7 @@ fn test_get_block_info_time() { contract2, None, |_, _| false, + None, ) .unwrap(); @@ -1726,6 +1755,7 @@ fn test_get_block_info_time() { contract3, None, |_, _| false, + None, ) .unwrap(); @@ -1748,6 +1778,7 @@ fn test_get_block_info_time() { contract3_3, None, |_, _| false, + None, ) .unwrap(); }); diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index f8f32bc6fb9..fe30fb6c116 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -1184,6 +1184,7 @@ fn test_cost_contract_short_circuits(use_mainnet: bool, clarity_version: Clarity contract_src, None, |_, _| false, + None, ) .unwrap(); tx.save_analysis(contract_name, &analysis).unwrap(); @@ -1468,6 +1469,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi contract_src, None, |_, _| false, + None, ) .unwrap(); tx.save_analysis(contract_name, &analysis).unwrap(); diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 9fecb3bbc36..404b6faef82 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -160,6 +160,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac BOOT_CODE_COSTS_2, None, |_, _| false, + None, ) .unwrap(); } @@ -185,6 +186,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac BOOT_CODE_COSTS_3, None, |_, _| false, + None, ) .unwrap(); } @@ -222,6 +224,7 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac tokens_contract, None, |_, _| false, + None, ) .unwrap() }); @@ -757,7 +760,8 @@ pub fn rollback_log_memory_test( &ct_ast, &contract, None, - |_, _| { false } + |_, _| { false }, + None ) .unwrap_err() ) @@ -833,7 +837,8 @@ pub fn let_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_id &ct_ast, &contract, None, - |_, _| { false } + |_, _| { false }, + None ) .unwrap_err() ) @@ -912,7 +917,8 @@ pub fn argument_memory_test( &ct_ast, &contract, None, - |_, _| { false } + |_, _| { false }, + None ) .unwrap_err() ) @@ -1007,7 +1013,8 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &ct_ast, &contract_ok, None, - |_, _| true + |_, _| true, + None ) .unwrap_err() { @@ -1033,7 +1040,8 @@ pub fn fcall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &ct_ast, &contract_err, None, - |_, _| false + |_, _| false, + None ) .unwrap_err() ) @@ -1120,6 +1128,7 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &contract, None, |_, _| false, + None, ) .unwrap(); conn.save_analysis(&contract_identifier, &ct_analysis) @@ -1143,7 +1152,8 @@ pub fn ccall_memory_test(#[case] clarity_version: ClarityVersion, #[case] epoch_ &ct_ast, &contract, None, - |_, _| false + |_, _| false, + None ) .unwrap_err() ) diff --git a/stackslib/src/util_lib/signed_structured_data.rs b/stackslib/src/util_lib/signed_structured_data.rs index ead99de5f21..7aadc403d6f 100644 --- a/stackslib/src/util_lib/signed_structured_data.rs +++ b/stackslib/src/util_lib/signed_structured_data.rs @@ -253,6 +253,7 @@ pub mod pox4 { body, None, |_, _| false, + None, ) .unwrap(); clarity_db From 3e0d03fd81e260000f29cab5a25313bd3dd48edf Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 25 Mar 2025 11:32:14 +0100 Subject: [PATCH 195/238] added integration test for contract publishing --- testnet/stacks-node/src/tests/signer/v0.rs | 111 ++++++++++++++++++++- 1 file changed, 109 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3f92aeb5df3..7bac669d901 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -774,7 +774,7 @@ impl MultipleMinerTest { let contract_tx = make_contract_publish( &self.sender_sk, self.sender_nonce, - self.send_fee, + self.send_fee + contract_name.len() as u64 + contract_src.len() as u64, self.signer_test.running_nodes.conf.burnchain.chain_id, contract_name, contract_src, @@ -12589,7 +12589,6 @@ fn miner_rejection_by_contract_call_execution_time_expired() { miners.wait_for_test_observer_blocks(60); - miners.send_fee = 300; // First, lets deploy the contract let dummy_contract_src = "(define-public (dummy (number uint)) (begin (ok (+ number u1))))"; @@ -12664,3 +12663,111 @@ fn miner_rejection_by_contract_call_execution_time_expired() { info!("------------------------- Shutdown -------------------------"); miners.shutdown(); } + +/// Test a scenario where: +/// Two miners boot to Nakamoto (first miner has max_execution_time set to 0). +/// Sortition occurs. Miner 1 wins. +/// Miner 1 fails to mine block N with contract-publish +/// Sortition occurs. Miner 2 wins. +/// Miner 2 successfully mines block N including the contract-publish previously rejected by miner 1 +/// Ensures both the miners are aligned +#[test] +#[ignore] +fn miner_rejection_by_contract_publish_execution_time_expired() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let num_txs = 3; + + let mut miners = MultipleMinerTest::new_with_config_modifications( + num_signers, + num_txs, + |signer_config| { + // Lets make sure we never time out since we need to stall some things to force our scenario + signer_config.block_proposal_validation_timeout = Duration::from_secs(1800); + signer_config.tenure_last_block_proposal_timeout = Duration::from_secs(1800); + signer_config.first_proposal_burn_block_timing = Duration::from_secs(1800); + }, + |config| config.miner.max_execution_time_secs = Some(0), + |config| config.miner.max_execution_time_secs = None, + ); + let rl1_skip_commit_op = miners + .signer_test + .running_nodes + .counters + .naka_skip_commit_op + .clone(); + let rl2_skip_commit_op = miners.rl2_counters.naka_skip_commit_op.clone(); + + let (conf_1, _) = miners.get_node_configs(); + let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); + let (_miner_pk_1, _) = miners.get_miner_public_keys(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + miners.boot_to_epoch_3(); + + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + info!("------------------------- Pause Miner 1's Block Commits -------------------------"); + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Mines a Nakamoto Block N -------------------------"); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block followed by Block N"); + + miners.wait_for_test_observer_blocks(60); + + // First, lets deploy the contract + let dummy_contract_src = + "(define-public (dummy (number uint)) (begin (ok (+ number u1))))(+ 1 1)"; + + let tx1 = miners.send_transfer_tx(); + + let contract_publish_txid = miners + .send_and_mine_contract_publish("dummy-contract", dummy_contract_src, 60) + .expect("Failed to publish contract in a new block"); + + miners.wait_for_test_observer_blocks(60); + + assert_eq!(last_block_contains_txid(&tx1), true); + + assert_eq!(last_block_contains_txid(&contract_publish_txid), false); + + verify_sortition_winner(&sortdb, &miner_pkh_1); + + info!("------------------------- Miner 2 Submits a Block Commit -------------------------"); + miners.submit_commit_miner_2(&sortdb); + + info!("------------------------- Mine Tenure -------------------------"); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block followed by Block N+3"); + + info!("------------------------- Miner 2 Mines Block N+1 -------------------------"); + + miners.sender_nonce -= 1; + + let contract_publish_txid = miners + .send_and_mine_contract_publish("dummy-contract", dummy_contract_src, 60) + .expect("Failed to publish contract in a new block"); + + miners.wait_for_test_observer_blocks(60); + + assert_eq!(last_block_contains_txid(&contract_publish_txid), true); + + verify_sortition_winner(&sortdb, &miner_pkh_2); + + // ensure both miners are aligned + miners.wait_for_chains(60); + + info!("------------------------- Shutdown -------------------------"); + miners.shutdown(); +} From 21ec8dbc717b3a25f47774d5141459f14bab4f72 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 25 Mar 2025 12:29:51 +0100 Subject: [PATCH 196/238] refactored test api --- testnet/stacks-node/src/tests/signer/v0.rs | 33 +++++++++++----------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7bac669d901..e5768e99ee7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -792,11 +792,22 @@ impl MultipleMinerTest { timeout_secs: u64, ) -> Result { let stacks_height_before = self.get_peer_stacks_tip_height(); + let txid = self.send_contract_publish(contract_name, contract_src); + + // wait for the new block to be mined wait_for(timeout_secs, || { Ok(self.get_peer_stacks_tip_height() > stacks_height_before) })?; - Ok(txid) + + // wait for the observer to see it + self.wait_for_test_observer_blocks(timeout_secs); + + if last_block_contains_txid(&txid) { + Ok(txid) + } else { + Err(txid) + } } pub fn send_contract_call( @@ -12592,14 +12603,10 @@ fn miner_rejection_by_contract_call_execution_time_expired() { // First, lets deploy the contract let dummy_contract_src = "(define-public (dummy (number uint)) (begin (ok (+ number u1))))"; - let contract_publish_txid = miners + let _ = miners .send_and_mine_contract_publish("dummy-contract", dummy_contract_src, 60) .expect("Failed to publish contract in a new block"); - miners.wait_for_test_observer_blocks(60); - - assert_eq!(last_block_contains_txid(&contract_publish_txid), true); - info!("------------------------- Miner 1 Mines a Nakamoto Block N+1 -------------------------"); let stacks_height_before = miners.get_peer_stacks_tip_height(); @@ -12731,16 +12738,12 @@ fn miner_rejection_by_contract_publish_execution_time_expired() { let tx1 = miners.send_transfer_tx(); - let contract_publish_txid = miners + let _ = miners .send_and_mine_contract_publish("dummy-contract", dummy_contract_src, 60) - .expect("Failed to publish contract in a new block"); - - miners.wait_for_test_observer_blocks(60); + .expect_err("Expected an error while publishing contract in a new block"); assert_eq!(last_block_contains_txid(&tx1), true); - assert_eq!(last_block_contains_txid(&contract_publish_txid), false); - verify_sortition_winner(&sortdb, &miner_pkh_1); info!("------------------------- Miner 2 Submits a Block Commit -------------------------"); @@ -12755,14 +12758,10 @@ fn miner_rejection_by_contract_publish_execution_time_expired() { miners.sender_nonce -= 1; - let contract_publish_txid = miners + let _ = miners .send_and_mine_contract_publish("dummy-contract", dummy_contract_src, 60) .expect("Failed to publish contract in a new block"); - miners.wait_for_test_observer_blocks(60); - - assert_eq!(last_block_contains_txid(&contract_publish_txid), true); - verify_sortition_winner(&sortdb, &miner_pkh_2); // ensure both miners are aligned From 2e683a3161e50eab149ac634efae522e44969a98 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 08:14:49 -0400 Subject: [PATCH 197/238] test: add proptests for `LruCache` --- Cargo.lock | 80 +++++++++++++++++++++++++++- stacks-common/Cargo.toml | 29 +++++++--- stacks-common/src/util/lru_cache.rs | 82 +++++++++++++++++++++++++++++ 3 files changed, 184 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a51010ecdf3..60032fa3090 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -461,6 +461,21 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + [[package]] name = "bitflags" version = "1.3.2" @@ -1502,7 +1517,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -2270,6 +2285,26 @@ dependencies = [ "thiserror", ] +[[package]] +name = "proptest" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.4.2", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_xorshift", + "regex-syntax 0.8.2", + "rusty-fork", + "tempfile", + "unarray", +] + [[package]] name = "protobuf" version = "2.28.0" @@ -2285,6 +2320,12 @@ dependencies = [ "cc", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.35" @@ -2365,6 +2406,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "redox_syscall" version = "0.4.1" @@ -2665,6 +2715,18 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" version = "1.0.16" @@ -3049,6 +3111,7 @@ dependencies = [ "lazy_static", "libc", "nix", + "proptest", "rand 0.8.5", "rand_core 0.6.4", "ripemd", @@ -3672,6 +3735,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicase" version = "2.7.0" @@ -3766,6 +3835,15 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + [[package]] name = "waker-fn" version = "1.1.1" diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 4b965d753d2..b388cb1a9e6 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -1,14 +1,25 @@ [package] name = "stacks-common" version = "0.0.1" -authors = [ "Jude Nelson ", - "Aaron Blankstein ", - "Ludo Galabru " ] +authors = [ + "Jude Nelson ", + "Aaron Blankstein ", + "Ludo Galabru ", +] license = "GPLv3" homepage = "https://github.com/blockstack/stacks-blockchain" repository = "https://github.com/blockstack/stacks-blockchain" description = "Common modules for blockstack_lib, libclarity" -keywords = [ "stacks", "stx", "bitcoin", "crypto", "blockstack", "decentralized", "dapps", "blockchain" ] +keywords = [ + "stacks", + "stx", + "bitcoin", + "crypto", + "blockstack", + "decentralized", + "dapps", + "blockchain", +] readme = "README.md" resolver = "2" edition = "2021" @@ -25,7 +36,7 @@ serde_derive = "1" sha3 = "0.10.1" ripemd = "0.1.1" lazy_static = "1.4.0" -slog = { version = "2.5.2", features = [ "max_level_trace" ] } +slog = { version = "2.5.2", features = ["max_level_trace"] } slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } chrono = "0.4.19" @@ -37,7 +48,12 @@ rusqlite = { workspace = true, optional = true } nix = "0.23" [target.'cfg(windows)'.dependencies] -winapi = { version = "0.3", features = ["consoleapi", "handleapi", "synchapi", "winbase"] } +winapi = { version = "0.3", features = [ + "consoleapi", + "handleapi", + "synchapi", + "winbase", +] } [target.'cfg(windows)'.dev-dependencies] winapi = { version = "0.3", features = ["fileapi", "processenv", "winnt"] } @@ -63,6 +79,7 @@ features = ["std"] [dev-dependencies] rand_core = { workspace = true } +proptest = "1.6.0" [features] default = ["developer-mode"] diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index c108a4deb16..7c49cbf7e2e 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -305,3 +305,85 @@ mod tests { assert_eq!(flushed, [(3, 3), (2, 2)]); } } + +#[cfg(test)] +mod property_tests { + use proptest::prelude::*; + + use super::*; + + #[derive(Debug, Clone)] + enum CacheOp { + Insert(u32), + Get(u32), + InsertClean(u32), + Flush, + } + + prop_compose! { + fn arbitrary_op()(op_type in 0..4, value in 0..100u32) -> CacheOp { + match op_type { + 0 => CacheOp::Insert(value), + 1 => CacheOp::Get(value), + 2 => CacheOp::InsertClean(value), + _ => CacheOp::Flush, + } + } + } + + proptest! { + #![proptest_config(ProptestConfig::with_cases(1_000_000))] + + #[test] + fn doesnt_crash_with_random_operations(ops in prop::collection::vec(arbitrary_op(), 1..1000)) { + let mut cache = LruCache::new(10); + for op in ops { + match op { + CacheOp::Insert(v) => { cache.insert(v, v); } + CacheOp::Get(v) => { cache.get(&v); } + CacheOp::InsertClean(v) => { cache.insert_clean(v, v); } + CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).unwrap(); } + } + } + } + + #[test] + fn maintains_size_invariant(ops in prop::collection::vec(0..100u32, 1..1000)) { + let capacity = 10; + let mut cache = LruCache::new(capacity); + for op in ops { + cache.insert(op, op); + prop_assert!(cache.cache.len() <= capacity); + prop_assert!(cache.order.len() <= capacity); + } + } + + #[test] + fn maintains_lru_order(ops in prop::collection::vec(arbitrary_op(), 1..1000)) { + let mut cache = LruCache::new(10); + for op in ops { + match op { + CacheOp::Insert(v) => { cache.insert(v, v); } + CacheOp::Get(v) => { cache.get(&v); } + CacheOp::InsertClean(v) => { cache.insert_clean(v, v); } + CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).unwrap(); } + } + // Verify linked list integrity + if !cache.order.is_empty() { + let mut curr = cache.head; + let mut count = 0; + while curr != cache.capacity { + if count >= cache.order.len() { + prop_assert!(false, "Linked list cycle detected"); + } + if cache.order[curr].next != cache.capacity { + prop_assert_eq!(cache.order[cache.order[curr].next].prev, curr); + } + curr = cache.order[curr].next; + count += 1; + } + } + } + } + } +} From 980ccb13164c2682159a3f213800d2c5d08a235d Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 25 Mar 2025 14:51:25 +0100 Subject: [PATCH 198/238] fixed clippy check --- clarity/src/vm/clarity.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index cbe71d482e5..8c5ac144c43 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -345,6 +345,7 @@ pub trait TransactionConnection: ClarityConnection { /// abort_call_back is called with an AssetMap and a ClarityDatabase reference, /// if abort_call_back returns true, all modifications from this transaction will be rolled back. /// otherwise, they will be committed (though they may later be rolled back if the block itself is rolled back). + #[allow(clippy::too_many_arguments)] fn initialize_smart_contract( &mut self, identifier: &QualifiedContractIdentifier, From 54d476f1dfbca7cd6709ceb7730142b873f8fc6e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 24 Mar 2025 20:24:50 -0400 Subject: [PATCH 199/238] feat: make `LruCache` methods fallible --- stacks-common/src/util/lru_cache.rs | 164 ++++++++++++++++------------ 1 file changed, 93 insertions(+), 71 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 7c49cbf7e2e..131dcb61f47 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -1,4 +1,4 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation +// Copyright (C) 2025 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -62,8 +62,12 @@ impl Display for LruCache { )?; let mut curr = self.head; while curr != self.capacity { - writeln!(f, " {}", self.order[curr])?; - curr = self.order[curr].next; + let Some(node) = self.order.get(curr) else { + writeln!(f, " ")?; + break; + }; + writeln!(f, " {}", node)?; + curr = node.next; } Ok(()) } @@ -82,78 +86,90 @@ impl LruCache { } /// Get the value for the given key - pub fn get(&mut self, key: &K) -> Option { - if let Some(node) = self.cache.get(key) { + /// Returns an error iff the cache is corrupted and should be discarded + pub fn get(&mut self, key: &K) -> Result, ()> { + if let Some(order_idx) = self.cache.get(key) { // Move the node to the head of the LRU list - let node = *node; - - if node != self.head { - let prev = self.order[node].prev; - let next = self.order[node].next; - - if node == self.tail { + if *order_idx != self.head { + let node = self.order.get_mut(*order_idx).ok_or(())?; + let prev = node.prev; + let next = node.next; + node.prev = self.capacity; + node.next = self.head; + + if *order_idx == self.tail { // If this is the tail, update the tail self.tail = prev; } else { // Else, update the next node's prev pointer - self.order[next].prev = prev; + let next_node = self.order.get_mut(next).ok_or(())?; + next_node.prev = prev; } - self.order[prev].next = next; - self.order[node].prev = self.capacity; - self.order[node].next = self.head; - self.order[self.head].prev = node; - self.head = node; + let prev_node = self.order.get_mut(prev).ok_or(())?; + prev_node.next = next; + + let head_node = self.order.get_mut(self.head).ok_or(())?; + head_node.prev = *order_idx; + self.head = *order_idx; } - Some(self.order[node].value) + let node = self.order.get(*order_idx).ok_or(())?; + Ok(Some(node.value)) } else { - None + Ok(None) } } /// Insert a key-value pair into the cache, marking it as dirty. - /// Returns `Some((K, V))` if a dirty value was evicted. - pub fn insert(&mut self, key: K, value: V) -> Option<(K, V)> { + /// Returns an error iff the cache is corrupted and should be discarded + /// Returns `Ok(Some((K, V)))` if a dirty value was evicted. + pub fn insert(&mut self, key: K, value: V) -> Result, ()> { self.insert_with_dirty(key, value, true) } /// Insert a key-value pair into the cache, marking it as clean. - /// Returns `Some((K, V))` if a dirty value was evicted. - pub fn insert_clean(&mut self, key: K, value: V) -> Option<(K, V)> { + /// Returns an error iff the cache is corrupted and should be discarded + /// Returns `Ok(Some((K, V)))` if a dirty value was evicted. + pub fn insert_clean(&mut self, key: K, value: V) -> Result, ()> { self.insert_with_dirty(key, value, false) } /// Insert a key-value pair into the cache - /// Returns `Some((K, V))` if a dirty value was evicted. - pub fn insert_with_dirty(&mut self, key: K, value: V, dirty: bool) -> Option<(K, V)> { + /// Returns an error iff the cache is corrupted and should be discarded + /// Returns `Ok(Some((K, V)))` if a dirty value was evicted. + pub fn insert_with_dirty( + &mut self, + key: K, + value: V, + dirty: bool, + ) -> Result, ()> { let mut evicted = None; - if let Some(node) = self.cache.get(&key) { + if let Some(order_idx) = self.cache.get(&key) { // Update the value for the key - let node = *node; - self.order[node].value = value; - self.order[node].dirty = dirty; + let node = self.order.get_mut(*order_idx).ok_or(())?; + node.value = value; + node.dirty = dirty; // Just call get to handle updating the LRU list - self.get(&key); + self.get(&key)?; } else { let index = if self.cache.len() == self.capacity { // Take the place of the least recently used element. // First, remove it from the tail of the LRU list let index = self.tail; - let prev = self.order[index].prev; - self.order[prev].next = self.capacity; - self.tail = prev; + let tail_node = self.order.get_mut(index).ok_or(())?; + let prev = tail_node.prev; // Remove it from the cache - self.cache.remove(&self.order[index].key); + self.cache.remove(&tail_node.key); // Replace the key with the new key, saving the old key - let replaced_key = std::mem::replace(&mut self.order[index].key, key.clone()); + let replaced_key = std::mem::replace(&mut tail_node.key, key.clone()); // If it is dirty, save the key-value pair to return - if self.order[index].dirty { - evicted = Some((replaced_key, self.order[index].value)); + if tail_node.dirty { + evicted = Some((replaced_key, tail_node.value)); } // Insert this new value into the cache @@ -161,10 +177,14 @@ impl LruCache { // Update the node with the new key-value pair, inserting it at // the head of the LRU list - self.order[index].value = value; - self.order[index].dirty = dirty; - self.order[index].next = self.head; - self.order[index].prev = self.capacity; + tail_node.value = value; + tail_node.dirty = dirty; + tail_node.next = self.head; + tail_node.prev = self.capacity; + + let tail_prev_node = self.order.get_mut(prev).ok_or(())?; + tail_prev_node.next = self.capacity; + self.tail = prev; index } else { @@ -193,9 +213,11 @@ impl LruCache { self.head = index; } - evicted + Ok(evicted) } + /// Flush all dirty values in the cache, calling the given function, `f`, + /// for each dirty value. pub fn flush(&mut self, mut f: impl FnMut(&K, V) -> Result<(), E>) -> Result<(), E> { let mut index = self.head; while index != self.capacity { @@ -219,39 +241,39 @@ mod tests { fn test_lru_cache() { let mut cache = LruCache::new(2); - cache.insert(1, 1); - cache.insert(2, 2); - assert_eq!(cache.get(&1), Some(1)); - cache.insert(3, 3); - assert_eq!(cache.get(&2), None); - cache.insert(4, 4); - assert_eq!(cache.get(&1), None); - assert_eq!(cache.get(&3), Some(3)); - assert_eq!(cache.get(&4), Some(4)); + cache.insert(1, 1).unwrap(); + cache.insert(2, 2).unwrap(); + assert_eq!(cache.get(&1).unwrap(), Some(1)); + cache.insert(3, 3).unwrap(); + assert_eq!(cache.get(&2).unwrap(), None); + cache.insert(4, 4).unwrap(); + assert_eq!(cache.get(&1).unwrap(), None); + assert_eq!(cache.get(&3).unwrap(), Some(3)); + assert_eq!(cache.get(&4).unwrap(), Some(4)); } #[test] fn test_lru_cache_update() { let mut cache = LruCache::new(2); - cache.insert(1, 1); - cache.insert(2, 2); - cache.insert(1, 10); - assert_eq!(cache.get(&1), Some(10)); - cache.insert(3, 3); - assert_eq!(cache.get(&2), None); - cache.insert(2, 4); - assert_eq!(cache.get(&2), Some(4)); - assert_eq!(cache.get(&3), Some(3)); + cache.insert(1, 1).unwrap(); + cache.insert(2, 2).unwrap(); + cache.insert(1, 10).unwrap(); + assert_eq!(cache.get(&1).unwrap(), Some(10)); + cache.insert(3, 3).unwrap(); + assert_eq!(cache.get(&2).unwrap(), None); + cache.insert(2, 4).unwrap(); + assert_eq!(cache.get(&2).unwrap(), Some(4)); + assert_eq!(cache.get(&3).unwrap(), Some(3)); } #[test] fn test_lru_cache_evicted() { let mut cache = LruCache::new(2); - assert!(cache.insert(1, 1).is_none()); - assert!(cache.insert(2, 2).is_none()); - let evicted = cache.insert(3, 3).expect("expected an eviction"); + assert!(cache.insert(1, 1).unwrap().is_none()); + assert!(cache.insert(2, 2).unwrap().is_none()); + let evicted = cache.insert(3, 3).unwrap().expect("expected an eviction"); assert_eq!(evicted, (1, 1)); } @@ -259,7 +281,7 @@ mod tests { fn test_lru_cache_flush() { let mut cache = LruCache::new(2); - cache.insert(1, 1); + cache.insert(1, 1).unwrap(); let mut flushed = Vec::new(); cache @@ -271,8 +293,8 @@ mod tests { assert_eq!(flushed, vec![(1, 1)]); - cache.insert(1, 3); - cache.insert(2, 2); + cache.insert(1, 3).unwrap(); + cache.insert(2, 2).unwrap(); let mut flushed = Vec::new(); cache @@ -289,10 +311,10 @@ mod tests { fn test_lru_cache_evict_clean() { let mut cache = LruCache::new(2); - assert!(cache.insert_with_dirty(0, 0, false).is_none()); - assert!(cache.insert_with_dirty(1, 1, false).is_none()); - assert!(cache.insert_with_dirty(2, 2, true).is_none()); - assert!(cache.insert_with_dirty(3, 3, true).is_none()); + assert!(cache.insert_with_dirty(0, 0, false).unwrap().is_none()); + assert!(cache.insert_with_dirty(1, 1, false).unwrap().is_none()); + assert!(cache.insert_with_dirty(2, 2, true).unwrap().is_none()); + assert!(cache.insert_with_dirty(3, 3, true).unwrap().is_none()); let mut flushed = Vec::new(); cache From fdb53481a1c63c6494601e30c40756cb88127934 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 10:21:51 -0400 Subject: [PATCH 200/238] feat: handle fallible `LruCache` in `NonceCache` --- stacks-common/src/util/lru_cache.rs | 48 +++++++++++++++++++------- stackslib/src/config/mod.rs | 8 +++++ stackslib/src/core/nonce_cache.rs | 52 +++++++++++++++++++++++++---- 3 files changed, 89 insertions(+), 19 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 131dcb61f47..b031bcdfb0f 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -15,7 +15,7 @@ use std::fmt::Display; -use hashbrown::HashMap; +use hashbrown::{HashMap, HashSet}; /// Node in the doubly linked list struct Node { @@ -75,7 +75,12 @@ impl Display for LruCache { impl LruCache { /// Create a new LRU cache with the given capacity - pub fn new(capacity: usize) -> Self { + pub fn new(mut capacity: usize) -> Self { + if capacity == 0 { + error!("Capacity must be greater than 0. Defaulting to 1024."); + capacity = 1024; + } + LruCache { capacity, cache: HashMap::new(), @@ -218,18 +223,37 @@ impl LruCache { /// Flush all dirty values in the cache, calling the given function, `f`, /// for each dirty value. - pub fn flush(&mut self, mut f: impl FnMut(&K, V) -> Result<(), E>) -> Result<(), E> { - let mut index = self.head; - while index != self.capacity { - let next = self.order[index].next; - if self.order[index].dirty { - let value = self.order[index].value; - f(&self.order[index].key, value)?; - self.order[index].dirty = false; + /// Outer result is an error iff the cache is corrupted and should be discarded. + /// Inner result is an error iff the function, `f`, returns an error. + pub fn flush( + &mut self, + mut f: impl FnMut(&K, V) -> Result<(), E>, + ) -> Result, ()> { + let mut current = self.head; + + // Keep track of visited nodes to detect cycles + let mut visited = HashSet::new(); + + while current != self.capacity { + // Detect cycles + if !visited.insert(current) { + return Err(()); } - index = next; + + let node = self.order.get_mut(current).ok_or(())?; + let next = node.next; + if node.dirty { + let value = node.value; + + // Call the flush function + match f(&node.key, value) { + Ok(()) => node.dirty = false, + Err(e) => return Ok(Err(e)), + } + } + current = next; } - Ok(()) + Ok(Ok(())) } } diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 5476c677d5f..8629ebed591 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2664,6 +2664,14 @@ impl MinerConfigFile { } else { miner_default_config.tenure_cost_limit_per_block_percentage }; + + let nonce_cache_size = self + .nonce_cache_size + .unwrap_or(miner_default_config.nonce_cache_size); + if nonce_cache_size == 0 { + return Err("miner.nonce_cache_size must be greater than 0".to_string()); + } + Ok(MinerConfig { first_attempt_time_ms: self .first_attempt_time_ms diff --git a/stackslib/src/core/nonce_cache.rs b/stackslib/src/core/nonce_cache.rs index c30a2451928..77e4aace970 100644 --- a/stackslib/src/core/nonce_cache.rs +++ b/stackslib/src/core/nonce_cache.rs @@ -39,12 +39,23 @@ use crate::util_lib::db::{query_row, u64_to_sql, DBConn, Error as db_error}; pub struct NonceCache { /// In-memory LRU cache of nonces. cache: LruCache, + max_size: usize, } impl NonceCache { pub fn new(max_size: usize) -> Self { Self { cache: LruCache::new(max_size), + max_size, + } + } + + /// Reset the cache to an empty state and clear the nonce DB. + /// This should only be called when the cache is corrupted. + fn reset_cache(&mut self, conn: &mut DBConn) { + self.cache = LruCache::new(self.max_size); + if let Err(e) = conn.execute("DELETE FROM nonces", []) { + warn!("error clearing nonces table: {e}"); } } @@ -67,9 +78,14 @@ impl NonceCache { C: ClarityConnection, { // Check in-memory cache - if let Some(cached_nonce) = self.cache.get(address) { - return cached_nonce; - }; + match self.cache.get(address) { + Ok(Some(nonce)) => return nonce, + Ok(None) => {} + Err(_) => { + // The cache is corrupt, reset it + self.reset_cache(mempool_db); + } + } // Check sqlite cache let db_nonce_opt = db_get_nonce(mempool_db, address).unwrap_or_else(|e| { @@ -79,7 +95,14 @@ impl NonceCache { if let Some(db_nonce) = db_nonce_opt { // Insert into in-memory cache, but it is not dirty, // since we just got it from the database. - let evicted = self.cache.insert_clean(address.clone(), db_nonce); + let evicted = match self.cache.insert_clean(address.clone(), db_nonce) { + Ok(evicted) => evicted, + Err(_) => { + // The cache is corrupt, reset it + self.reset_cache(mempool_db); + None + } + }; if evicted.is_some() { // If we evicted something, we need to flush the cache. self.flush_with_evicted(mempool_db, evicted); @@ -97,7 +120,14 @@ impl NonceCache { /// Set the nonce for `address` to `value` in the in-memory cache. /// If this causes an eviction, flush the in-memory cache to the DB. pub fn set(&mut self, address: StacksAddress, value: u64, conn: &mut DBConn) { - let evicted = self.cache.insert(address.clone(), value); + let evicted = match self.cache.insert(address.clone(), value) { + Ok(evicted) => evicted, + Err(_) => { + // The cache is corrupt, reset it + self.reset_cache(conn); + Some((address, value)) + } + }; if evicted.is_some() { // If we evicted something, we need to flush the cache. self.flush_with_evicted(conn, evicted); @@ -147,10 +177,18 @@ impl NonceCache { tx.execute(sql, params![addr, nonce])?; } - self.cache.flush(|addr, nonce| { + match self.cache.flush(|addr, nonce| { tx.execute(sql, params![addr, nonce])?; Ok::<(), db_error>(()) - })?; + }) { + Ok(inner) => inner?, + Err(_) => { + drop(tx); + // The cache is corrupt, reset it and return + self.reset_cache(conn); + return Ok(()); + } + }; tx.commit()?; From e6775d1545c041d7a897f1a9a96932d45cbdbecd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 10:53:22 -0400 Subject: [PATCH 201/238] feat: add key check for cache safety --- stacks-common/src/util/lru_cache.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index b031bcdfb0f..27a1312b049 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -120,6 +120,11 @@ impl LruCache { } let node = self.order.get(*order_idx).ok_or(())?; + // Safety check: if the key doesn't match, the cache is corrupted + if node.key != *key { + return Err(()); + } + Ok(Some(node.value)) } else { Ok(None) From 2b79d703a8f49b746c4914c84b54febf5e36dafa Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 10:53:44 -0400 Subject: [PATCH 202/238] test: update proptests for fallible operations --- stacks-common/src/util/lru_cache.rs | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 27a1312b049..c590cd2cd66 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -318,7 +318,8 @@ mod tests { flushed.push((*k, v)); Ok::<(), ()>(()) }) - .unwrap(); + .expect("cache corrupted") + .expect("flush failed"); assert_eq!(flushed, vec![(1, 1)]); @@ -331,7 +332,8 @@ mod tests { flushed.push((*k, v)); Ok::<(), ()>(()) }) - .unwrap(); + .expect("cache corrupted") + .expect("flush failed"); assert_eq!(flushed, vec![(2, 2), (1, 3)]); } @@ -351,7 +353,8 @@ mod tests { flushed.push((*k, v)); Ok::<(), ()>(()) }) - .unwrap(); + .expect("cache corrupted") + .expect("flush failed"); assert_eq!(flushed, [(3, 3), (2, 2)]); } @@ -390,10 +393,10 @@ mod property_tests { let mut cache = LruCache::new(10); for op in ops { match op { - CacheOp::Insert(v) => { cache.insert(v, v); } - CacheOp::Get(v) => { cache.get(&v); } - CacheOp::InsertClean(v) => { cache.insert_clean(v, v); } - CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).unwrap(); } + CacheOp::Insert(v) => { cache.insert(v, v).expect("cache corrupted"); } + CacheOp::Get(v) => { cache.get(&v).expect("cache corrupted"); } + CacheOp::InsertClean(v) => { cache.insert_clean(v, v).expect("cache corrupted"); } + CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).expect("cache corrupted").expect("flush failed"); } } } } @@ -403,7 +406,7 @@ mod property_tests { let capacity = 10; let mut cache = LruCache::new(capacity); for op in ops { - cache.insert(op, op); + cache.insert(op, op).expect("cache corrupted"); prop_assert!(cache.cache.len() <= capacity); prop_assert!(cache.order.len() <= capacity); } @@ -414,10 +417,10 @@ mod property_tests { let mut cache = LruCache::new(10); for op in ops { match op { - CacheOp::Insert(v) => { cache.insert(v, v); } - CacheOp::Get(v) => { cache.get(&v); } - CacheOp::InsertClean(v) => { cache.insert_clean(v, v); } - CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).unwrap(); } + CacheOp::Insert(v) => { cache.insert(v, v).expect("cache corrupted"); } + CacheOp::Get(v) => { cache.get(&v).expect("cache corrupted"); } + CacheOp::InsertClean(v) => { cache.insert_clean(v, v).expect("cache corrupted"); } + CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).expect("cache corrupted").expect("flush failed"); } } // Verify linked list integrity if !cache.order.is_empty() { From 1deecd01085c8c6488cc4ac557785a43c316f69b Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 25 Mar 2025 16:21:42 +0100 Subject: [PATCH 203/238] refactored testing --- testnet/stacks-node/src/tests/signer/v0.rs | 69 ++++++++++++---------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 123edc2db36..1ce6fc8bf15 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -759,20 +759,22 @@ impl MultipleMinerTest { Ok(txid) } - pub fn send_contract_publish(&mut self, contract_name: &str, contract_src: &str) -> String { - let http_origin = format!( - "http://{}", - &self.signer_test.running_nodes.conf.node.rpc_bind - ); + pub fn send_contract_publish( + &mut self, + sender_nonce: u64, + contract_name: &str, + contract_src: &str, + ) -> String { + let http_origin = self.node_http(); + let sender_addr = tests::to_addr(&self.sender_sk); let contract_tx = make_contract_publish( &self.sender_sk, - self.sender_nonce, + sender_nonce, self.send_fee + contract_name.len() as u64 + contract_src.len() as u64, self.signer_test.running_nodes.conf.burnchain.chain_id, contract_name, contract_src, ); - self.sender_nonce += 1; submit_tx(&http_origin, &contract_tx) } @@ -780,18 +782,20 @@ impl MultipleMinerTest { /// Returns the txid of the transfer tx. pub fn send_and_mine_contract_publish( &mut self, + sender_nonce: u64, contract_name: &str, contract_src: &str, timeout_secs: u64, ) -> Result { let stacks_height_before = self.get_peer_stacks_tip_height(); - let txid = self.send_contract_publish(contract_name, contract_src); + let txid = self.send_contract_publish(sender_nonce, contract_name, contract_src); // wait for the new block to be mined wait_for(timeout_secs, || { Ok(self.get_peer_stacks_tip_height() > stacks_height_before) - })?; + }) + .unwrap(); // wait for the observer to see it self.wait_for_test_observer_blocks(timeout_secs); @@ -805,17 +809,15 @@ impl MultipleMinerTest { pub fn send_contract_call( &mut self, + sender_nonce: u64, contract_name: &str, function_name: &str, function_args: &[clarity::vm::Value], ) -> String { - let http_origin = format!( - "http://{}", - &self.signer_test.running_nodes.conf.node.rpc_bind - ); + let http_origin = self.node_http(); let contract_tx = make_contract_call( &self.sender_sk, - self.sender_nonce, + sender_nonce, self.send_fee, self.signer_test.running_nodes.conf.burnchain.chain_id, &tests::to_addr(&self.sender_sk), @@ -823,7 +825,6 @@ impl MultipleMinerTest { function_name, function_args, ); - self.sender_nonce += 1; submit_tx(&http_origin, &contract_tx) } @@ -8232,7 +8233,7 @@ fn block_proposal_max_age_rejections() { let short_timeout = Duration::from_secs(30); info!("------------------------- Send Block Proposal To Signers -------------------------"); - let info_before = get_chain_info(&signer_test.running_nodes.conf); + let _ = get_chain_info(&signer_test.running_nodes.conf); let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), txs: vec![], @@ -8301,7 +8302,9 @@ fn block_proposal_max_age_rejections() { .unwrap_or((0, 0)); assert_eq!(block_2_status.1, 0, "Block 2 should always be rejected"); - info!("Block 2 status"; "accepted" => block_2_status.1, "rejected" => block_2_status.0); + info!("Block 2 status"; + "accepted" => %block_2_status.1, "rejected" => %block_2_status.0 + ); Ok(block_2_status.0 > num_signers * 7 / 10) }) .expect("Timed out waiting for block rejections"); @@ -12474,19 +12477,25 @@ fn miner_rejection_by_contract_call_execution_time_expired() { // First, lets deploy the contract let dummy_contract_src = "(define-public (dummy (number uint)) (begin (ok (+ number u1))))"; + let sender_nonce = 0; + let _ = miners - .send_and_mine_contract_publish("dummy-contract", dummy_contract_src, 60) + .send_and_mine_contract_publish(sender_nonce, "dummy-contract", dummy_contract_src, 60) .expect("Failed to publish contract in a new block"); info!("------------------------- Miner 1 Mines a Nakamoto Block N+1 -------------------------"); let stacks_height_before = miners.get_peer_stacks_tip_height(); - let tx1 = miners.send_transfer_tx(); + let (tx1, sender_nonce) = miners.send_transfer_tx(); // try calling the contract (has to fail) - let contract_call_txid = - miners.send_contract_call("dummy-contract", "dummy", &[clarity::vm::Value::UInt(1)]); + let contract_call_txid = miners.send_contract_call( + sender_nonce + 1, + "dummy-contract", + "dummy", + &[clarity::vm::Value::UInt(1)], + ); let _ = wait_for(60, || { Ok(miners.get_peer_stacks_tip_height() > stacks_height_before) @@ -12499,8 +12508,6 @@ fn miner_rejection_by_contract_call_execution_time_expired() { info!("------------------------- Miner 1 Mines a Nakamoto Block N+2 -------------------------"); - miners.sender_nonce -= 1; - let tx2 = miners .send_and_mine_transfer_tx(60) .expect("Failed to mine N + 2"); @@ -12523,8 +12530,12 @@ fn miner_rejection_by_contract_call_execution_time_expired() { let stacks_height_before = miners.get_peer_stacks_tip_height(); - let contract_call_txid = - miners.send_contract_call("dummy-contract", "dummy", &[clarity::vm::Value::UInt(1)]); + let contract_call_txid = miners.send_contract_call( + sender_nonce + 2, + "dummy-contract", + "dummy", + &[clarity::vm::Value::UInt(1)], + ); let _ = wait_for_block_pushed_by_miner_key(30, stacks_height_before + 1, &miner_pk_2) .expect("Failed to get block N+3"); @@ -12607,10 +12618,10 @@ fn miner_rejection_by_contract_publish_execution_time_expired() { let dummy_contract_src = "(define-public (dummy (number uint)) (begin (ok (+ number u1))))(+ 1 1)"; - let tx1 = miners.send_transfer_tx(); + let (tx1, sender_nonce) = miners.send_transfer_tx(); let _ = miners - .send_and_mine_contract_publish("dummy-contract", dummy_contract_src, 60) + .send_and_mine_contract_publish(sender_nonce + 1, "dummy-contract", dummy_contract_src, 60) .expect_err("Expected an error while publishing contract in a new block"); assert_eq!(last_block_contains_txid(&tx1), true); @@ -12627,10 +12638,8 @@ fn miner_rejection_by_contract_publish_execution_time_expired() { info!("------------------------- Miner 2 Mines Block N+1 -------------------------"); - miners.sender_nonce -= 1; - let _ = miners - .send_and_mine_contract_publish("dummy-contract", dummy_contract_src, 60) + .send_and_mine_contract_publish(sender_nonce + 1, "dummy-contract", dummy_contract_src, 60) .expect("Failed to publish contract in a new block"); verify_sortition_winner(&sortdb, &miner_pkh_2); From 4831d47031ea6742ee4f503babea38f3c7d72f0a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 15:21:38 -0400 Subject: [PATCH 204/238] test: add LRU proptest --- stacks-common/src/util/lru_cache.rs | 74 ++++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 1 deletion(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 7c49cbf7e2e..254660cc3dc 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -304,12 +304,46 @@ mod tests { assert_eq!(flushed, [(3, 3), (2, 2)]); } + + pub struct SimpleLRU { + pub cache: Vec, + capacity: usize, + } + + impl SimpleLRU { + pub fn new(capacity: usize) -> Self { + SimpleLRU { + cache: Vec::with_capacity(capacity), + capacity, + } + } + + pub fn insert(&mut self, key: u32) { + if let Some(pos) = self.cache.iter().position(|&x| x == key) { + self.cache.remove(pos); + } else if self.cache.len() == self.capacity { + self.cache.remove(0); + } + self.cache.push(key); + } + + pub fn get(&mut self, key: u32) -> Option { + if let Some(pos) = self.cache.iter().position(|&x| x == key) { + self.cache.remove(pos); + self.cache.push(key); + Some(key) + } else { + None + } + } + } } #[cfg(test)] mod property_tests { use proptest::prelude::*; + use super::tests::SimpleLRU; use super::*; #[derive(Debug, Clone)] @@ -359,7 +393,7 @@ mod property_tests { } #[test] - fn maintains_lru_order(ops in prop::collection::vec(arbitrary_op(), 1..1000)) { + fn maintains_linked_list_integrity(ops in prop::collection::vec(arbitrary_op(), 1..1000)) { let mut cache = LruCache::new(10); for op in ops { match op { @@ -385,5 +419,43 @@ mod property_tests { } } } + + #[test] + fn maintains_lru_correctness(ops in prop::collection::vec(arbitrary_op(), 1..1000)) { + let mut cache = LruCache::new(5); + let mut simple = SimpleLRU::new(5); + for op in ops { + match op { + CacheOp::Insert(v) => { + cache.insert(v, v); + simple.insert(v); + } + CacheOp::Get(v) => { + let actual = cache.get(&v); + let expected = simple.get(v); + prop_assert_eq!(actual, expected); + } + CacheOp::InsertClean(v) => { + cache.insert_clean(v, v); + simple.insert(v); + } + CacheOp::Flush => cache.flush(|_, _| Ok::<(), ()>(())).unwrap(), + }; + + // The cache should have the same order as the simple LRU + let mut curr = cache.head; + let mut count = 0; + while curr != cache.capacity { + if count >= cache.order.len() { + prop_assert!(false, "Linked list cycle detected"); + } + let idx = simple.cache.len() - count - 1; + prop_assert_eq!(cache.order[curr].key, simple.cache[idx]); + prop_assert_eq!(cache.order[curr].value, simple.cache[idx]); + curr = cache.order[curr].next; + count += 1; + } + } + } } } From 0fcc36133334bd13ad648ba28080998fbe4ef54e Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 15:49:16 -0400 Subject: [PATCH 205/238] test: improve proptests for `LruCache` --- stacks-common/src/util/lru_cache.rs | 94 +++++++++++++++++++---------- 1 file changed, 62 insertions(+), 32 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 254660cc3dc..2a9d081c6ab 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -305,8 +305,9 @@ mod tests { assert_eq!(flushed, [(3, 3), (2, 2)]); } + /// Simple LRU implementation for testing pub struct SimpleLRU { - pub cache: Vec, + pub cache: Vec>, capacity: usize, } @@ -318,24 +319,41 @@ mod tests { } } - pub fn insert(&mut self, key: u32) { - if let Some(pos) = self.cache.iter().position(|&x| x == key) { + pub fn insert(&mut self, key: u32, value: u32, dirty: bool) { + if let Some(pos) = self.cache.iter().position(|x| x.key == key) { self.cache.remove(pos); } else if self.cache.len() == self.capacity { self.cache.remove(0); } - self.cache.push(key); + self.cache.push(Node { + key, + value, + dirty, + next: 0, + prev: 0, + }); } pub fn get(&mut self, key: u32) -> Option { - if let Some(pos) = self.cache.iter().position(|&x| x == key) { - self.cache.remove(pos); - self.cache.push(key); - Some(key) + if let Some(pos) = self.cache.iter().position(|x| x.key == key) { + let node = self.cache.remove(pos); + let value = node.value; + self.cache.push(node); + Some(value) } else { None } } + + pub fn flush(&mut self, mut f: impl FnMut(&u32, u32) -> Result<(), E>) -> Result<(), E> { + for node in self.cache.iter_mut().rev() { + if node.dirty { + f(&node.key, node.value)?; + } + node.dirty = false; + } + Ok(()) + } } } @@ -348,18 +366,18 @@ mod property_tests { #[derive(Debug, Clone)] enum CacheOp { - Insert(u32), + Insert(u32, u32), Get(u32), - InsertClean(u32), + InsertClean(u32, u32), Flush, } prop_compose! { - fn arbitrary_op()(op_type in 0..4, value in 0..100u32) -> CacheOp { + fn arbitrary_op()(op_type in 0..4, key in 0..100u32, value in 0..1000u32) -> CacheOp { match op_type { - 0 => CacheOp::Insert(value), - 1 => CacheOp::Get(value), - 2 => CacheOp::InsertClean(value), + 0 => CacheOp::Insert(key, value), + 1 => CacheOp::Get(key), + 2 => CacheOp::InsertClean(key, value), _ => CacheOp::Flush, } } @@ -373,9 +391,9 @@ mod property_tests { let mut cache = LruCache::new(10); for op in ops { match op { - CacheOp::Insert(v) => { cache.insert(v, v); } - CacheOp::Get(v) => { cache.get(&v); } - CacheOp::InsertClean(v) => { cache.insert_clean(v, v); } + CacheOp::Insert(k, v) => { cache.insert(k, v); } + CacheOp::Get(k) => { cache.get(&k); } + CacheOp::InsertClean(k, v) => { cache.insert_clean(k, v); } CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).unwrap(); } } } @@ -397,9 +415,9 @@ mod property_tests { let mut cache = LruCache::new(10); for op in ops { match op { - CacheOp::Insert(v) => { cache.insert(v, v); } - CacheOp::Get(v) => { cache.get(&v); } - CacheOp::InsertClean(v) => { cache.insert_clean(v, v); } + CacheOp::Insert(k, v) => { cache.insert(k, v); } + CacheOp::Get(k) => { cache.get(&k); } + CacheOp::InsertClean(k, v) => { cache.insert_clean(k, v); } CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).unwrap(); } } // Verify linked list integrity @@ -426,20 +444,32 @@ mod property_tests { let mut simple = SimpleLRU::new(5); for op in ops { match op { - CacheOp::Insert(v) => { - cache.insert(v, v); - simple.insert(v); + CacheOp::Insert(k, v) => { + cache.insert(k, v); + simple.insert(k, v, true); } - CacheOp::Get(v) => { - let actual = cache.get(&v); - let expected = simple.get(v); + CacheOp::Get(k) => { + let actual = cache.get(&k); + let expected = simple.get(k); prop_assert_eq!(actual, expected); } - CacheOp::InsertClean(v) => { - cache.insert_clean(v, v); - simple.insert(v); + CacheOp::InsertClean(k, v) => { + cache.insert_clean(k, v); + simple.insert(k, v, false); + } + CacheOp::Flush => { + let mut flushed = vec![]; + let mut simple_flushed = vec![]; + cache.flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }).unwrap(); + simple.flush(|k, v| { + simple_flushed.push((*k, v)); + Ok::<(), ()>(()) + }).unwrap(); + prop_assert_eq!(flushed, simple_flushed); } - CacheOp::Flush => cache.flush(|_, _| Ok::<(), ()>(())).unwrap(), }; // The cache should have the same order as the simple LRU @@ -450,8 +480,8 @@ mod property_tests { prop_assert!(false, "Linked list cycle detected"); } let idx = simple.cache.len() - count - 1; - prop_assert_eq!(cache.order[curr].key, simple.cache[idx]); - prop_assert_eq!(cache.order[curr].value, simple.cache[idx]); + prop_assert_eq!(cache.order[curr].key, simple.cache[idx].key); + prop_assert_eq!(cache.order[curr].value, simple.cache[idx].value); curr = cache.order[curr].next; count += 1; } From fc8adfc193edc5e3dd51991a9adcc36cf8ce389b Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 17:38:00 -0400 Subject: [PATCH 206/238] refactor: reorganize the `LruCache` implementation Hopefully this makes it easier to follow and removes some duplication. --- stacks-common/src/util/lru_cache.rs | 158 +++++++++++++++++----------- 1 file changed, 94 insertions(+), 64 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 2a9d081c6ab..46fe689b621 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -83,30 +83,9 @@ impl LruCache { /// Get the value for the given key pub fn get(&mut self, key: &K) -> Option { - if let Some(node) = self.cache.get(key) { - // Move the node to the head of the LRU list - let node = *node; - - if node != self.head { - let prev = self.order[node].prev; - let next = self.order[node].next; - - if node == self.tail { - // If this is the tail, update the tail - self.tail = prev; - } else { - // Else, update the next node's prev pointer - self.order[next].prev = prev; - } - - self.order[prev].next = next; - self.order[node].prev = self.capacity; - self.order[node].next = self.head; - self.order[self.head].prev = node; - self.head = node; - } - - Some(self.order[node].value) + if let Some(&index) = self.cache.get(key) { + self.move_to_head(index); + Some(self.order[index].value) } else { None } @@ -127,75 +106,60 @@ impl LruCache { /// Insert a key-value pair into the cache /// Returns `Some((K, V))` if a dirty value was evicted. pub fn insert_with_dirty(&mut self, key: K, value: V, dirty: bool) -> Option<(K, V)> { - let mut evicted = None; - if let Some(node) = self.cache.get(&key) { - // Update the value for the key - let node = *node; - self.order[node].value = value; - self.order[node].dirty = dirty; - - // Just call get to handle updating the LRU list - self.get(&key); + if let Some(&index) = self.cache.get(&key) { + // Update an existing node + self.order[index].value = value; + self.order[index].dirty = dirty; + self.move_to_head(index); + None } else { + let mut evicted = None; + // This is a new key let index = if self.cache.len() == self.capacity { - // Take the place of the least recently used element. - // First, remove it from the tail of the LRU list - let index = self.tail; - let prev = self.order[index].prev; - self.order[prev].next = self.capacity; - self.tail = prev; - - // Remove it from the cache - self.cache.remove(&self.order[index].key); + // We've reached capacity. Evict the least-recently used value + // and reuse its node + let index = self.evict_lru(); // Replace the key with the new key, saving the old key let replaced_key = std::mem::replace(&mut self.order[index].key, key.clone()); - // If it is dirty, save the key-value pair to return + // Save the evicted key-value pair, if it was dirty if self.order[index].dirty { evicted = Some((replaced_key, self.order[index].value)); - } - - // Insert this new value into the cache - self.cache.insert(key, index); + }; - // Update the node with the new key-value pair, inserting it at - // the head of the LRU list + // Update the evicted node with the new key-value pair self.order[index].value = value; self.order[index].dirty = dirty; - self.order[index].next = self.head; - self.order[index].prev = self.capacity; + + // Insert the new key-value pair into the cache + self.cache.insert(key.clone(), index); index } else { - // Insert a new key-value pair + // Create a new node, add it to the cache + let index = self.order.len(); let node = Node { key: key.clone(), value, dirty, - next: self.head, + next: self.capacity, prev: self.capacity, }; - - let index = self.order.len(); self.order.push(node); self.cache.insert(key, index); - index }; - // Put it at the head of the LRU list - if self.head != self.capacity { - self.order[self.head].prev = index; - } else { - self.tail = index; - } + // Put the new or reused node at the head of the LRU list + self.attach_as_head(index); - self.head = index; + evicted } - evicted } + /// Flush all dirty values in the cache, calling the given function, `f`, + /// for each dirty value. pub fn flush(&mut self, mut f: impl FnMut(&K, V) -> Result<(), E>) -> Result<(), E> { let mut index = self.head; while index != self.capacity { @@ -209,6 +173,72 @@ impl LruCache { } Ok(()) } + + /// Helper function to remove a node from the linked list (by index) + fn detach_node(&mut self, index: usize) { + if index >= self.order.len() { + return; + } + + let prev = self.order[index].prev; + let next = self.order[index].next; + + if index == self.tail { + // If this is the last node, update the tail to point to its previous node + self.tail = prev; + } else { + // Else, update the next node to point to the previous node + self.order[next].prev = prev; + } + + if index == self.head { + // If this is the first node, update the head to point to the next node + self.head = next; + } else { + // Else, update the previous node to point to the next node + self.order[prev].next = next; + } + } + + /// Helper function to attach a node as the head of the linked list + fn attach_as_head(&mut self, index: usize) { + self.order[index].prev = self.capacity; + self.order[index].next = self.head; + + if self.head != self.capacity { + // If there is a head, update its previous pointer to this one + self.order[self.head].prev = index; + } else { + // Else, the list was empty, so update the tail + self.tail = index; + } + self.head = index; + } + + /// Helper function to move a node to the head of the linked list + fn move_to_head(&mut self, index: usize) { + if index == self.head { + // If the node is already the head, do nothing + return; + } + + self.detach_node(index); + self.attach_as_head(index); + } + + /// Helper function to evict the least-recently used node, which is the + /// tail of the linked list + /// Returns the index of the evicted node + fn evict_lru(&mut self) -> usize { + let index = self.tail; + if index == self.capacity { + // If the list is empty, do nothing + return self.capacity; + } + self.detach_node(index); + self.cache.remove(&self.order[index].key); + index + } } #[cfg(test)] From d7df7c99417be4b00f85d1f27055f158886df96f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 17:41:48 -0400 Subject: [PATCH 207/238] refactor: move core::util to core::test_util --- stackslib/src/core/mod.rs | 4 ++-- stackslib/src/core/{util.rs => test_util.rs} | 0 stackslib/src/core/tests/mod.rs | 2 +- stackslib/src/net/tests/download/nakamoto.rs | 2 +- stackslib/src/net/tests/inv/nakamoto.rs | 2 +- stackslib/src/net/tests/mempool/mod.rs | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) rename stackslib/src/core/{util.rs => test_util.rs} (100%) diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 5d6720d2384..30e042e510b 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -32,10 +32,10 @@ use crate::chainstate::burn::ConsensusHash; pub mod mempool; pub mod nonce_cache; +#[cfg(any(test, feature = "testing"))] +pub mod test_util; #[cfg(test)] pub mod tests; -#[cfg(any(test, feature = "testing"))] -pub mod util; use std::cmp::Ordering; pub type StacksEpoch = GenericStacksEpoch; diff --git a/stackslib/src/core/util.rs b/stackslib/src/core/test_util.rs similarity index 100% rename from stackslib/src/core/util.rs rename to stackslib/src/core/test_util.rs diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 120acb478fd..00010874ca2 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -65,7 +65,7 @@ use crate::core::mempool::{ db_get_all_nonces, MemPoolSyncData, MemPoolWalkSettings, MemPoolWalkTxTypes, TxTag, BLOOM_COUNTER_DEPTH, BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS, }; -use crate::core::util::{insert_tx_in_mempool, make_stacks_transfer, to_addr}; +use crate::core::test_util::{insert_tx_in_mempool, make_stacks_transfer, to_addr}; use crate::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::net::Error as NetError; use crate::util_lib::bloom::test::setup_bloom_counter; diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index c71d46cb135..9bd62705663 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -43,7 +43,7 @@ use crate::chainstate::stacks::{ TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; -use crate::core::util::to_addr; +use crate::core::test_util::to_addr; use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::download::nakamoto::{TenureStartEnd, WantedTenure, *}; use crate::net::inv::nakamoto::NakamotoTenureInv; diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index c8248ef4524..ce94e4865aa 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -43,7 +43,7 @@ use crate::chainstate::stacks::{ TransactionAuth, TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; -use crate::core::util::to_addr; +use crate::core::test_util::to_addr; use crate::core::StacksEpochExtension; use crate::net::inv::nakamoto::{InvGenerator, NakamotoInvStateMachine, NakamotoTenureInv}; use crate::net::neighbors::comms::NeighborComms; diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 81dc0cd43c7..f6b38f42de5 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -32,7 +32,7 @@ use crate::burnchains::*; use crate::chainstate::nakamoto::coordinator::tests::make_token_transfer; use crate::chainstate::stacks::test::*; use crate::chainstate::stacks::*; -use crate::core::util::to_addr; +use crate::core::test_util::to_addr; use crate::core::StacksEpochExtension; use crate::net::atlas::*; use crate::net::codec::*; From 9756fb04a8dc1b70dcf2a7772d3bb5c52477185c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 17:43:53 -0400 Subject: [PATCH 208/238] feat: simplify `to_addr` implementation --- stackslib/src/core/test_util.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/stackslib/src/core/test_util.rs b/stackslib/src/core/test_util.rs index d49a7a59229..519d6600135 100644 --- a/stackslib/src/core/test_util.rs +++ b/stackslib/src/core/test_util.rs @@ -266,13 +266,7 @@ pub fn make_contract_publish_microblock_only( } pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { - StacksAddress::from_public_keys( - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, - &AddressHashMode::SerializeP2PKH, - 1, - &vec![StacksPublicKey::from_private(sk)], - ) - .unwrap() + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sk)) } pub fn make_stacks_transfer( From 2f7cdb28e1c60a63a9da3e1cd411574a0d3d8a2c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 17:48:05 -0400 Subject: [PATCH 209/238] refactor: finish rename of core::util to core::test_util --- testnet/stacks-node/src/tests/epoch_205.rs | 2 +- testnet/stacks-node/src/tests/epoch_21.rs | 2 +- testnet/stacks-node/src/tests/epoch_22.rs | 2 +- testnet/stacks-node/src/tests/epoch_23.rs | 2 +- testnet/stacks-node/src/tests/epoch_24.rs | 2 +- testnet/stacks-node/src/tests/epoch_25.rs | 2 +- testnet/stacks-node/src/tests/integrations.rs | 2 +- testnet/stacks-node/src/tests/mempool.rs | 2 +- testnet/stacks-node/src/tests/mod.rs | 2 +- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- testnet/stacks-node/src/tests/neon_integrations.rs | 2 +- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 0dbb548461c..7cd5109a97b 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -14,7 +14,7 @@ use stacks::chainstate::stacks::{ StacksBlockHeader, StacksPrivateKey, StacksTransaction, TransactionPayload, }; use stacks::config::{EventKeyType, InitialBalance}; -use stacks::core::util::{ +use stacks::core::test_util::{ make_contract_call, make_contract_call_mblock_only, make_contract_publish, make_contract_publish_microblock_only, to_addr, }; diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 3d85c0e9099..83218866dd6 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -25,7 +25,7 @@ use stacks::chainstate::stacks::miner::{ use stacks::chainstate::stacks::StacksBlockHeader; use stacks::clarity_cli::vm_execute as execute; use stacks::config::{Config, InitialBalance}; -use stacks::core::util::make_contract_call; +use stacks::core::test_util::make_contract_call; use stacks::core::{self, EpochList, BURNCHAIN_TX_SEARCH_WINDOW}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{ diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index f1ef3c4dc45..e9bdeb70222 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -9,7 +9,7 @@ use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready}; use stacks::clarity_cli::vm_execute as execute; use stacks::config::{EventKeyType, EventObserverConfig, InitialBalance}; -use stacks::core::util::{make_contract_call, make_stacks_transfer}; +use stacks::core::test_util::{make_contract_call, make_stacks_transfer}; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 1c6c19e970d..a003e8033f7 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -20,7 +20,7 @@ use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::Value; use stacks::burnchains::{Burnchain, PoxConstants}; use stacks::config::InitialBalance; -use stacks::core::util::make_contract_call; +use stacks::core::test_util::make_contract_call; use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks_common::util::sleep_ms; diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index fc1fc1a64e3..82b5c5f45e1 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -27,7 +27,7 @@ use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::clarity_cli::vm_execute as execute; use stacks::config::InitialBalance; -use stacks::core::util::{make_contract_call, to_addr}; +use stacks::core::test_util::{make_contract_call, to_addr}; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::consts::STACKS_EPOCH_MAX; diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 1a1ef463f18..45a32b8924c 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -18,7 +18,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, PoxConstants}; use stacks::config::InitialBalance; -use stacks::core::util::{make_stacks_transfer_mblock_only, to_addr}; +use stacks::core::test_util::{make_stacks_transfer_mblock_only, to_addr}; use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksPrivateKey; diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index dbfd48307eb..0371a088c6b 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -26,7 +26,7 @@ use stacks::clarity_vm::clarity::ClarityConnection; use stacks::codec::StacksMessageCodec; use stacks::config::InitialBalance; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; -use stacks::core::util::{ +use stacks::core::test_util::{ make_contract_call, make_contract_publish, make_sponsored_stacks_transfer_on_testnet, make_stacks_transfer, to_addr, }; diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index b60a0041162..4268daab08d 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -15,7 +15,7 @@ use stacks::chainstate::stacks::{ }; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; -use stacks::core::util::{ +use stacks::core::test_util::{ make_coinbase, make_contract_call, make_contract_publish, make_poison, make_stacks_transfer, sign_standard_single_sig_tx_anchor_mode_version, to_addr, }; diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 702c7244daa..5dc5e7d2965 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -28,7 +28,7 @@ use stacks::chainstate::stacks::{ StacksPrivateKey, StacksPublicKey, StacksTransaction, TransactionPayload, }; #[cfg(any(test, feature = "testing"))] -use stacks::core::util::{make_contract_publish, to_addr}; +use stacks::core::test_util::{make_contract_publish, to_addr}; use stacks::core::{StacksEpoch, StacksEpochExtension, StacksEpochId, CHAIN_ID_TESTNET}; use stacks_common::address::AddressHashMode; use stacks_common::codec::StacksMessageCodec; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index c249234aed0..cbc9dcf27da 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -63,7 +63,7 @@ use stacks::chainstate::stacks::{ }; use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::mempool::{MemPoolWalkStrategy, MAXIMUM_MEMPOOL_TX_CHAINING}; -use stacks::core::util::{ +use stacks::core::test_util::{ insert_tx_in_mempool, make_contract_call, make_contract_publish_versioned, make_stacks_transfer, }; use stacks::core::{ diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index e805df59cdd..1d5b6963d5a 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -42,7 +42,7 @@ use stacks::cli; use stacks::codec::StacksMessageCodec; use stacks::config::{EventKeyType, EventObserverConfig, FeeEstimatorName, InitialBalance}; use stacks::core::mempool::MemPoolWalkTxTypes; -use stacks::core::util::{ +use stacks::core::test_util::{ make_contract_call, make_contract_publish, make_contract_publish_microblock_only, make_microblock, make_stacks_transfer, make_stacks_transfer_mblock_only, to_addr, }; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3ac0443edce..b9ae8d1cfb2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -45,7 +45,7 @@ use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, Transacti use stacks::codec::StacksMessageCodec; use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig}; use stacks::core::mempool::MemPoolWalkStrategy; -use stacks::core::util::{ +use stacks::core::test_util::{ insert_tx_in_mempool, make_contract_call, make_contract_publish, make_stacks_transfer, }; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; From 0db41151ffca3078b4364de5799456ee7ae0d12a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 17:52:08 -0400 Subject: [PATCH 210/238] chore: minor changes from review --- stacks-common/src/util/lru_cache.rs | 2 +- stackslib/src/chainstate/stacks/tests/block_construction.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 46fe689b621..77ecb64a985 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -40,7 +40,7 @@ impl Display for Node { } } -/// LRU cache for account nonces +/// LRU cache pub struct LruCache { capacity: usize, /// Map from address to an offset in the linked list diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 67980fbfd05..aabad63b33d 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5031,7 +5031,7 @@ fn paramaterized_mempool_walk_test( #[test] /// Test that the mempool walk query ignores old nonces and prefers next possible nonces before higher global fees. fn mempool_walk_test_next_nonce_with_highest_fee_rate_strategy() { - let key_address_pairs: Vec<(Secp256k1PrivateKey, StacksAddress)> = (0..7) + let key_address_pairs: Vec<_> = (0..7) .map(|_user_index| { let privk = StacksPrivateKey::random(); let addr = StacksAddress::from_public_keys( From 30a68d8d1c56d26f56a55406244df0e2da83a300 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 25 Mar 2025 18:06:37 -0400 Subject: [PATCH 211/238] docs: add more comments to mempool iteration algorithm --- stackslib/src/core/mempool.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 06bdcd9eb8d..675a8ac14eb 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1616,6 +1616,15 @@ impl MemPoolDB { .query(NO_PARAMS) .map_err(Error::SqliteError)?; + // Here we have a nested loop to walk the mempool. + // + // The `GlobalFeeRate` strategy includes all transactions, so we just + // query once and walk the full mempool in the inner loop. + // + // The `NextNonceWithHighestFeeRate` strategy only selects transactions + // that have the next expected nonce, so we need to re-query the + // mempool after one batch has been processed and the nonce table has + // been updated. This is handled in the outer loop. let stop_reason = loop { let mut state_changed = false; @@ -1902,7 +1911,11 @@ impl MemPoolDB { }; // If we've reached the end of the mempool, or if we've stopped - // iterating for some other reason, break out of the loop + // iterating for some other reason, break out of the loop. In the + // case of `NextNonceWithHighestFeeRate` we know we've reached the + // end of the mempool if the state has not changed. In the case of + // `GlobalFeeRate` we know we've reached the end of the mempool if + // the stop reason is `NoMoreCandidates`. if settings.strategy != MemPoolWalkStrategy::NextNonceWithHighestFeeRate || stop_reason != MempoolIterationStopReason::NoMoreCandidates || !state_changed From c9d02bc71f889f559c8488d894455776f9350c78 Mon Sep 17 00:00:00 2001 From: Simone Orsi Date: Tue, 25 Mar 2025 23:09:05 +0100 Subject: [PATCH 212/238] chore: remove sip folder and update GitHub URL references - Remove the sip/ folder from the stacks-core repository as the official SIP documentation is now maintained at https://github.com/stacksgov/sips - Update GitHub URL reference from blockstack to stacksgov in schema definitions to point to the new location Closes #5950 --- .../entities/contracts/read-only-function-args.schema.json | 2 +- sip/README.md | 5 ----- sip/sip-000-stacks-improvement-proposal-process.md | 5 ----- sip/sip-001-burn-election.md | 5 ----- sip/sip-002-smart-contract-language.md | 5 ----- sip/sip-003-peer-network.md | 5 ----- sip/sip-004-materialized-view.md | 5 ----- sip/sip-005-blocks-and-transactions.md | 5 ----- sip/sip-006-runtime-cost-assessment.md | 5 ----- sip/sip-007-stacking-consensus.md | 5 ----- sip/sip-008-analysis-cost-assessment.md | 5 ----- 11 files changed, 1 insertion(+), 51 deletions(-) delete mode 100644 sip/README.md delete mode 100644 sip/sip-000-stacks-improvement-proposal-process.md delete mode 100644 sip/sip-001-burn-election.md delete mode 100644 sip/sip-002-smart-contract-language.md delete mode 100644 sip/sip-003-peer-network.md delete mode 100644 sip/sip-004-materialized-view.md delete mode 100644 sip/sip-005-blocks-and-transactions.md delete mode 100644 sip/sip-006-runtime-cost-assessment.md delete mode 100644 sip/sip-007-stacking-consensus.md delete mode 100644 sip/sip-008-analysis-cost-assessment.md diff --git a/docs/rpc/entities/contracts/read-only-function-args.schema.json b/docs/rpc/entities/contracts/read-only-function-args.schema.json index ff457fe0319..7d497337561 100644 --- a/docs/rpc/entities/contracts/read-only-function-args.schema.json +++ b/docs/rpc/entities/contracts/read-only-function-args.schema.json @@ -1,7 +1,7 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "title": "ReadOnlyFunctionArgs", - "description": "Describes representation of a Type-0 Stacks 2.0 transaction. https://github.com/blockstack/stacks-blockchain/blob/master/sip/sip-005-blocks-and-transactions.md#type-0-transferring-an-asset", + "description": "Describes representation of a Type-0 Stacks 2.0 transaction. https://github.com/stacksgov/sips/blob/main/sips/sip-005/sip-005-blocks-and-transactions.md#type-0-transferring-an-asset", "type": "object", "required": ["sender", "arguments"], "properties": { diff --git a/sip/README.md b/sip/README.md deleted file mode 100644 index bce5ef7a907..00000000000 --- a/sip/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Stacks Improvement Proposals (SIPs) - -This directory formerly contained all of the in-progress Stacks Improvement Proposals before the Stacks 2.0 mainnet launched. - -The SIPs are now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-000-stacks-improvement-proposal-process.md b/sip/sip-000-stacks-improvement-proposal-process.md deleted file mode 100644 index 987c9ca6cfb..00000000000 --- a/sip/sip-000-stacks-improvement-proposal-process.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-000 Stacks Improvement Proposal Process - -This document formerly contained SIP-000 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-000/sip-000-stacks-improvement-proposal-process.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-001-burn-election.md b/sip/sip-001-burn-election.md deleted file mode 100644 index 19a4aca215c..00000000000 --- a/sip/sip-001-burn-election.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-001 Burn Election - -This document formerly contained SIP-001 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-001/sip-001-burn-election.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-002-smart-contract-language.md b/sip/sip-002-smart-contract-language.md deleted file mode 100644 index 26503048b5e..00000000000 --- a/sip/sip-002-smart-contract-language.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-002 Smart Contract Language - -This document formerly contained SIP-002 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-002/sip-002-smart-contract-language.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-003-peer-network.md b/sip/sip-003-peer-network.md deleted file mode 100644 index 84ae5dfd25c..00000000000 --- a/sip/sip-003-peer-network.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-003 Peer Network - -This document formerly contained SIP-003 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-003/sip-003-peer-network.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-004-materialized-view.md b/sip/sip-004-materialized-view.md deleted file mode 100644 index def065a1751..00000000000 --- a/sip/sip-004-materialized-view.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-004 Cryptographic Committment to Materialized Views - -This document formerly contained SIP-004 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-004/sip-004-materialized-view.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-005-blocks-and-transactions.md b/sip/sip-005-blocks-and-transactions.md deleted file mode 100644 index eda0f300045..00000000000 --- a/sip/sip-005-blocks-and-transactions.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-005 Blocks, Transactions, and Accounts - -This document formerly contained SIP-005 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-005/sip-005-blocks-and-transactions.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-006-runtime-cost-assessment.md b/sip/sip-006-runtime-cost-assessment.md deleted file mode 100644 index 019e6173f01..00000000000 --- a/sip/sip-006-runtime-cost-assessment.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-006 Clarity Execution Cost Assessment - -This document formerly contained SIP-006 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-006/sip-006-runtime-cost-assessment.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-007-stacking-consensus.md b/sip/sip-007-stacking-consensus.md deleted file mode 100644 index 37afb07230c..00000000000 --- a/sip/sip-007-stacking-consensus.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-007 Stacking Consensus - -This document formerly contained SIP-007 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-007/sip-007-stacking-consensus.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). diff --git a/sip/sip-008-analysis-cost-assessment.md b/sip/sip-008-analysis-cost-assessment.md deleted file mode 100644 index 72813cdc085..00000000000 --- a/sip/sip-008-analysis-cost-assessment.md +++ /dev/null @@ -1,5 +0,0 @@ -# SIP-008 Clarity Parsing and Analysis Cost Assessment - -This document formerly contained SIP-008 before the Stacks 2.0 mainnet launched. - -This SIP is now located in the [stacksgov/sips repository](https://github.com/stacksgov/sips/blob/main/sips/sip-008/sip-008-analysis-cost-assessment.md) as part of the [Stacks Community Governance organization](https://github.com/stacksgov). From d2b31d9ca8a3f9d84d44cccf3efe59074297535a Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 26 Mar 2025 20:17:37 +0100 Subject: [PATCH 213/238] fixed integration test for contract calls fees --- testnet/stacks-node/src/tests/signer/v0.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1ce6fc8bf15..077a55a95e9 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -815,10 +815,21 @@ impl MultipleMinerTest { function_args: &[clarity::vm::Value], ) -> String { let http_origin = self.node_http(); + // build a fake tx for getting a rough amount of fee + let fake_contract_tx = make_contract_call( + &self.sender_sk, + sender_nonce, + 100, + self.signer_test.running_nodes.conf.burnchain.chain_id, + &tests::to_addr(&self.sender_sk), + contract_name, + function_name, + function_args, + ); let contract_tx = make_contract_call( &self.sender_sk, sender_nonce, - self.send_fee, + self.send_fee + fake_contract_tx.len() as u64, self.signer_test.running_nodes.conf.burnchain.chain_id, &tests::to_addr(&self.sender_sk), contract_name, From 87548770232afb0be95fa20233e30e5a79f155a7 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 26 Mar 2025 15:25:51 -0400 Subject: [PATCH 214/238] chore: update copyright date --- stacks-common/src/util/lru_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 77ecb64a985..022169305c5 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -1,4 +1,4 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation +// Copyright (C) 2025 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by From af945f2a8495af20eeadff28bd06d2793f868a9f Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 26 Mar 2025 15:41:31 -0400 Subject: [PATCH 215/238] fix: merge error --- stacks-common/src/util/lru_cache.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 672cc640be9..24b780647ce 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -62,8 +62,12 @@ impl Display for LruCache { )?; let mut curr = self.head; while curr != self.capacity { - writeln!(f, " {}", self.order[curr])?; - curr = self.order[curr].next; + let Some(node) = self.order.get(curr) else { + writeln!(f, " ")?; + break; + }; + writeln!(f, " {}", node)?; + curr = node.next; } Ok(()) } From 39610a886ae1f47bac255cdc97bc92b52a6481ac Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 26 Mar 2025 16:11:04 -0400 Subject: [PATCH 216/238] fix: merge error --- testnet/stacks-node/src/tests/signer/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a27b2e11281..4f9ee34f37c 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -32,6 +32,7 @@ use stacks::chainstate::nakamoto::NakamotoBlock; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::StacksPrivateKey; use stacks::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; +use stacks::core::test_util::{make_contract_call, make_contract_publish, make_stacks_transfer}; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; @@ -54,7 +55,6 @@ use super::nakamoto_integrations::{ check_nakamoto_empty_block_heuristics, next_block_and, wait_for, }; use super::neon_integrations::{get_account, get_sortition_info_ch, submit_tx_fallible}; -use super::{make_contract_call, make_contract_publish, make_stacks_transfer}; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; From 7b56ed0591778714a1d47d0c6dc8a9fac7fd9167 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 26 Mar 2025 14:13:42 -0700 Subject: [PATCH 217/238] Fix version mismatch, send updates when state is Initialized, and add a test --- libsigner/src/v0/messages.rs | 6 + stacks-signer/src/v0/signer.rs | 22 +- stacks-signer/src/v0/signer_state.rs | 55 +++- testnet/stacks-node/src/tests/epoch_205.rs | 5 +- .../src/tests/neon_integrations.rs | 70 +---- testnet/stacks-node/src/tests/signer/mod.rs | 7 + testnet/stacks-node/src/tests/signer/v0.rs | 287 ++++++++++++++---- 7 files changed, 295 insertions(+), 157 deletions(-) diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index a27da8058d5..5438eef6d38 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -1705,6 +1705,12 @@ impl From for SignerMessage { } } +impl From for SignerMessage { + fn from(update: StateMachineUpdate) -> Self { + Self::StateMachineUpdate(update) + } +} + #[cfg(test)] mod test { use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 97625538f38..97b35693c65 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -129,7 +129,7 @@ impl std::fmt::Display for Signer { impl SignerTrait for Signer { /// Create a new signer from the given configuration fn new(stacks_client: &StacksClient, signer_config: SignerConfig) -> Self { - let stackerdb = StackerDB::from(&signer_config); + let mut stackerdb = StackerDB::from(&signer_config); let mode = match signer_config.signer_mode { SignerConfigMode::DryRun => SignerMode::DryRun, SignerConfigMode::Normal { signer_id, .. } => SignerMode::Normal { signer_id }, @@ -141,11 +141,12 @@ impl SignerTrait for Signer { SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); let proposal_config = ProposalEvalConfig::from(&signer_config); - let signer_state = LocalStateMachine::new(&signer_db, stacks_client, &proposal_config) - .unwrap_or_else(|e| { - warn!("Failed to initialize local state machine for signer: {e:?}"); - LocalStateMachine::Uninitialized - }); + let signer_state = + LocalStateMachine::new(&signer_db, &mut stackerdb, stacks_client, &proposal_config) + .unwrap_or_else(|e| { + warn!("Failed to initialize local state machine for signer: {e:?}"); + LocalStateMachine::Uninitialized + }); Self { private_key: signer_config.stacks_private_key, stackerdb, @@ -213,7 +214,7 @@ impl SignerTrait for Signer { } if self.reward_cycle <= current_reward_cycle { - self.local_state_machine.handle_pending_update(&self.signer_db, stacks_client, &self.proposal_config) + self.local_state_machine.handle_pending_update(&self.signer_db, &mut self.stackerdb, stacks_client, &self.proposal_config) .unwrap_or_else(|e| error!("{self}: failed to update local state machine for pending update"; "err" => ?e)); } @@ -299,6 +300,9 @@ impl SignerTrait for Signer { self.mock_sign(mock_proposal.clone()); } } + SignerMessage::StateMachineUpdate(_update) => { + // TODO: should make note of this update view point to determine if there is an agreed upon global state + } _ => {} } } @@ -330,7 +334,7 @@ impl SignerTrait for Signer { panic!("{self} Failed to write burn block event to signerdb: {e}"); }); self.local_state_machine - .bitcoin_block_arrival(&self.signer_db, stacks_client, &self.proposal_config, Some(*burn_height)) + .bitcoin_block_arrival(&self.signer_db, &mut self.stackerdb, stacks_client, &self.proposal_config, Some(*burn_height)) .unwrap_or_else(|e| error!("{self}: failed to update local state machine for latest bitcoin block arrival"; "err" => ?e)); *sortition_state = None; } @@ -352,7 +356,7 @@ impl SignerTrait for Signer { "block_height" => block_height ); self.local_state_machine - .stacks_block_arrival(consensus_hash, *block_height, block_id) + .stacks_block_arrival(&mut self.stackerdb, consensus_hash, *block_height, block_id) .unwrap_or_else(|e| error!("{self}: failed to update local state machine for latest stacks block arrival"; "err" => ?e)); if let Ok(Some(mut block_info)) = self diff --git a/stacks-signer/src/v0/signer_state.rs b/stacks-signer/src/v0/signer_state.rs index 720a59dab5f..f5f64cd0a01 100644 --- a/stacks-signer/src/v0/signer_state.rs +++ b/stacks-signer/src/v0/signer_state.rs @@ -18,8 +18,8 @@ use std::time::{Duration, UNIX_EPOCH}; use blockstack_lib::chainstate::burn::ConsensusHashExtensions; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use libsigner::v0::messages::{ - StateMachineUpdate as StateMachineUpdateMessage, StateMachineUpdateContent, - StateMachineUpdateMinerState, + MessageSlotID, SignerMessage, StateMachineUpdate as StateMachineUpdateMessage, + StateMachineUpdateContent, StateMachineUpdateMinerState, }; use serde::{Deserialize, Serialize}; use stacks_common::bitvec::BitVec; @@ -32,11 +32,11 @@ use stacks_common::{info, warn}; use crate::chainstate::{ ProposalEvalConfig, SignerChainstateError, SortitionState, SortitionsView, }; -use crate::client::{ClientError, CurrentAndLastSortition, StacksClient}; +use crate::client::{ClientError, CurrentAndLastSortition, StackerDB, StacksClient}; use crate::signerdb::SignerDb; /// This is the latest supported protocol version for this signer binary -pub static SUPPORTED_SIGNER_PROTOCOL_VERSION: u64 = 1; +pub static SUPPORTED_SIGNER_PROTOCOL_VERSION: u64 = 0; /// A signer state machine view. This struct can /// be used to encode the local signer's view or @@ -144,11 +144,12 @@ impl LocalStateMachine { /// and signerdb for the current sortition information pub fn new( db: &SignerDb, + stackerdb: &mut StackerDB, client: &StacksClient, proposal_config: &ProposalEvalConfig, ) -> Result { let mut instance = Self::Uninitialized; - instance.bitcoin_block_arrival(db, client, proposal_config, None)?; + instance.bitcoin_block_arrival(db, stackerdb, client, proposal_config, None)?; Ok(instance) } @@ -158,7 +159,22 @@ impl LocalStateMachine { burn_block: ConsensusHash::empty(), burn_block_height: 0, current_miner: MinerState::NoValidMiner, - active_signer_protocol_version: 1, + active_signer_protocol_version: SUPPORTED_SIGNER_PROTOCOL_VERSION, + } + } + + /// Send the local tate machine as an signer update message to stackerdb + pub fn send_signer_update_message(&self, stackerdb: &mut StackerDB) { + let update: Result = self.try_into(); + match update { + Ok(update) => { + if let Err(e) = stackerdb.send_message_with_retry::(update.into()) { + warn!("Failed to send signer update to stacker-db: {e:?}",); + } + } + Err(e) => { + warn!("Failed to convert local signer state to a signer message: {e:?}"); + } } } @@ -166,16 +182,21 @@ impl LocalStateMachine { pub fn handle_pending_update( &mut self, db: &SignerDb, + stackerdb: &mut StackerDB, client: &StacksClient, proposal_config: &ProposalEvalConfig, ) -> Result<(), SignerChainstateError> { let LocalStateMachine::Pending { update, .. } = self else { - return self.check_miner_inactivity(db, client, proposal_config); + return self.check_miner_inactivity(db, stackerdb, client, proposal_config); }; match update.clone() { - StateMachineUpdate::BurnBlock(expected_burn_height) => { - self.bitcoin_block_arrival(db, client, proposal_config, Some(expected_burn_height)) - } + StateMachineUpdate::BurnBlock(expected_burn_height) => self.bitcoin_block_arrival( + db, + stackerdb, + client, + proposal_config, + Some(expected_burn_height), + ), } } @@ -216,6 +237,7 @@ impl LocalStateMachine { fn check_miner_inactivity( &mut self, db: &SignerDb, + stackerdb: &mut StackerDB, client: &StacksClient, proposal_config: &ProposalEvalConfig, ) -> Result<(), SignerChainstateError> { @@ -256,7 +278,8 @@ impl LocalStateMachine { "inactive_tenure_ch" => %inactive_tenure_ch, "new_active_tenure_ch" => %new_active_tenure_ch ); - + // We have updated our state, so let other signers know. + self.send_signer_update_message(stackerdb); Ok(()) } else { warn!("Current miner timed out due to inactivity, but prior miner is not valid. Allowing current miner to continue"); @@ -324,6 +347,7 @@ impl LocalStateMachine { /// Handle a new stacks block arrival pub fn stacks_block_arrival( &mut self, + stackerdb: &mut StackerDB, ch: &ConsensusHash, height: u64, block_id: &StacksBlockId, @@ -379,6 +403,8 @@ impl LocalStateMachine { *parent_tenure_last_block = *block_id; *parent_tenure_last_block_height = height; *self = LocalStateMachine::Initialized(prior_state_machine); + // We updated the block id and/or the height. Let other signers know our view has changed + self.send_signer_update_message(stackerdb); Ok(()) } @@ -426,6 +452,7 @@ impl LocalStateMachine { pub fn bitcoin_block_arrival( &mut self, db: &SignerDb, + stackerdb: &mut StackerDB, client: &StacksClient, proposal_config: &ProposalEvalConfig, mut expected_burn_height: Option, @@ -433,7 +460,7 @@ impl LocalStateMachine { // set self to uninitialized so that if this function errors, // self is left as uninitialized. let prior_state = std::mem::replace(self, Self::Uninitialized); - let prior_state_machine = match prior_state { + let prior_state_machine = match prior_state.clone() { // if the local state machine was uninitialized, just initialize it LocalStateMachine::Uninitialized => Self::place_holder(), LocalStateMachine::Initialized(signer_state_machine) => signer_state_machine, @@ -512,6 +539,10 @@ impl LocalStateMachine { active_signer_protocol_version: prior_state_machine.active_signer_protocol_version, }); + if prior_state != *self { + self.send_signer_update_message(stackerdb); + } + Ok(()) } } diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 1e602a4a7df..688f8f0a70d 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -1,5 +1,4 @@ use std::collections::HashMap; -use std::sync::atomic::Ordering; use std::{env, thread}; use clarity::vm::costs::ExecutionCost; @@ -21,14 +20,12 @@ use stacks::core::{ use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRFSeed}; use stacks_common::util::hash::hex_bytes; -use stacks_common::util::sleep_ms; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::neon_integrations::*; use crate::tests::{ make_contract_call, make_contract_call_mblock_only, make_contract_publish, - make_contract_publish_microblock_only, run_until_burnchain_height, select_transactions_where, - to_addr, + run_until_burnchain_height, select_transactions_where, to_addr, }; use crate::{neon, BitcoinRegtestController, BurnchainController, Keychain}; diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 5111393dd8c..620943893ce 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -11,7 +11,6 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::PrincipalData; use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value, MAX_CALL_STACK_DEPTH}; -use rand::Rng; use rusqlite::params; use serde::Deserialize; use serde_json::json; @@ -30,12 +29,11 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::POX_4_NAME; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{ - signal_mining_blocked, signal_mining_ready, TransactionErrorEvent, TransactionEvent, - TransactionSuccessEvent, + TransactionErrorEvent, TransactionEvent, TransactionSuccessEvent, }; use stacks::chainstate::stacks::{ - StacksBlock, StacksBlockHeader, StacksMicroblock, StacksMicroblockHeader, StacksPrivateKey, - StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, + StacksBlock, StacksBlockHeader, StacksMicroblock, StacksPrivateKey, StacksPublicKey, + StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; use stacks::cli; @@ -90,8 +88,6 @@ use crate::stacks_common::types::PrivateKey; use crate::syncctl::PoxSyncWatchdogComms; use crate::tests::gen_random_port; use crate::tests::nakamoto_integrations::{get_key_for_cycle, wait_for}; -use crate::util::hash::{MerkleTree, Sha512Trunc256Sum}; -use crate::util::secp256k1::MessageSignature; use crate::{neon, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAddress) { @@ -3316,34 +3312,6 @@ fn should_fix_2771() { channel.stop_chains_coordinator(); } -/// Returns a StacksMicroblock with the given transactions, sequence, and parent block that is -/// signed with the given private key. -fn make_signed_microblock( - block_privk: &StacksPrivateKey, - txs: Vec, - parent_block: BlockHeaderHash, - seq: u16, -) -> StacksMicroblock { - let mut rng = rand::thread_rng(); - - let txid_vecs: Vec<_> = txs.iter().map(|tx| tx.txid().as_bytes().to_vec()).collect(); - let merkle_tree = MerkleTree::::new(&txid_vecs); - let tx_merkle_root = merkle_tree.root(); - - let mut mblock = StacksMicroblock { - header: StacksMicroblockHeader { - version: rng.gen(), - sequence: seq, - prev_block: parent_block, - tx_merkle_root, - signature: MessageSignature([0u8; 65]), - }, - txs, - }; - mblock.sign(block_privk).unwrap(); - mblock -} - #[test] #[ignore] fn filter_low_fee_tx_integration_test() { @@ -8614,38 +8582,6 @@ pub fn make_random_tx_chain( chain } -fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) -> Vec> { - let addr = to_addr(privk); - let mut chain = vec![]; - - for nonce in 0..25 { - // N.B. private keys are 32-33 bytes, so this is always safe - let random_iters = privk.to_bytes()[nonce as usize] as usize; - - let be_bytes = [ - privk.to_bytes()[nonce as usize], - privk.to_bytes()[(nonce + 1) as usize], - ]; - - let random_extra_fee = u16::from_be_bytes(be_bytes) as u64; - - let mut addr_prefix = addr.to_string(); - let _ = addr_prefix.split_off(12); - let contract_name = format!("crct-{nonce}-{addr_prefix}-{random_iters}"); - eprintln!("Make tx {contract_name}"); - let tx = make_contract_publish_microblock_only( - privk, - nonce, - 1049230 + nonce + fee_plus + random_extra_fee, - chain_id, - &contract_name, - &make_runtime_sized_contract(1, nonce, &addr_prefix), - ); - chain.push(tx); - } - chain -} - fn test_competing_miners_build_on_same_chain( num_miners: usize, conf_template: Config, diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a27b2e11281..91c5717e0f7 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -1220,6 +1220,13 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest(accepted.into()) .expect("Failed to send accept signature"); } + + pub fn signer_public_keys(&self) -> Vec { + self.signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect() + } } fn setup_stx_btc_node( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 88abdbca33f..32ab3a3a676 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -24,7 +24,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{ BlockAccepted, BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, PeerInfo, RejectCode, - RejectReason, SignerMessage, + RejectReason, SignerMessage, StateMachineUpdateContent, StateMachineUpdateMinerState, }; use libsigner::{ BlockProposal, BlockProposalData, SignerSession, StackerDBSession, VERSION_STRING, @@ -33,6 +33,7 @@ use stacks::address::AddressHashMode; use stacks::burnchains::Txid; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::LeaderBlockCommitOp; +use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; @@ -1261,6 +1262,65 @@ pub fn wait_for_block_rejections_from_signers( Ok(result) } +/// Waits for all of the provided signers to send an update for a block with the specificed burn block height and parent tenure stacks block height +pub fn wait_for_state_machine_update( + timeout_secs: u64, + expected_burn_block: &ConsensusHash, + expected_burn_block_height: u64, + expected_miner_info: Option<(Hash160, u64)>, +) -> Result<(), String> { + wait_for(timeout_secs, || { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + for chunk in stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + let SignerMessage::StateMachineUpdate(update) = message else { + continue; + }; + let StateMachineUpdateContent::V0 { + burn_block, + burn_block_height, + current_miner, + } = &update.content; + if *burn_block_height != expected_burn_block_height || burn_block != expected_burn_block + { + continue; + } + match current_miner { + StateMachineUpdateMinerState::ActiveMiner { + current_miner_pkh, + parent_tenure_last_block_height, + .. + } => { + if let Some(( + expected_miner_pkh, + expected_miner_parent_tenure_last_block_height, + )) = expected_miner_info + { + if expected_miner_pkh != *current_miner_pkh + || expected_miner_parent_tenure_last_block_height + != *parent_tenure_last_block_height + { + continue; + } + } + } + StateMachineUpdateMinerState::NoValidMiner => { + if expected_miner_info.is_some() { + continue; + }; + } + } + // We only need one update to match our conditions + return Ok(true); + } + Ok(false) + }) +} + #[test] #[ignore] /// Test that a signer can respond to an invalid block proposal @@ -5921,11 +5981,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { None, None, ); - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = signer_test.signer_public_keys(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); @@ -6095,11 +6151,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { None, None, ); - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = signer_test.signer_public_keys(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); @@ -6319,11 +6371,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Submitted tx {tx} in to attempt to mine block N+1"); let block_n_1 = wait_for_block_proposal(30, info_before.stacks_tip_height + 1, &miner_pk) .expect("Timed out waiting for block N+1 to be proposed"); - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = signer_test.signer_public_keys(); wait_for_block_global_acceptance_from_signers( 30, &block_n_1.header.signer_signature_hash(), @@ -6480,12 +6528,7 @@ fn continue_after_fast_block_no_sortition() { let burnchain = conf_1.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - let all_signers = miners - .signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = miners.signer_test.signer_public_keys(); let get_burn_height = || { SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -6773,11 +6816,7 @@ fn signing_in_0th_tenure_of_reward_cycle() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); - let signer_public_keys = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let signer_public_keys = signer_test.signer_public_keys(); let long_timeout = Duration::from_secs(200); signer_test.boot_to_epoch_3(); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -7045,11 +7084,7 @@ fn block_commit_delay() { .expect("Timed out waiting for block commit after new Stacks block"); // Prevent a block from being mined by making signers reject it. - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = signer_test.signer_public_keys(); TEST_REJECT_ALL_BLOCK_PROPOSAL.set(all_signers); info!("------------------------- Test Mine Burn Block -------------------------"); @@ -7315,11 +7350,7 @@ fn block_validation_check_rejection_timeout_heuristic() { ); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); let miner_pk = StacksPublicKey::from_private(&miner_sk); - let all_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); + let all_signers = signer_test.signer_public_keys(); signer_test.boot_to_epoch_3(); @@ -7537,11 +7568,7 @@ fn block_validation_pending_table() { .expect("Timed out waiting for pending block validation to be removed"); // for test cleanup we need to wait for block rejections - let signer_keys = signer_test - .signer_configs - .iter() - .map(|c| StacksPublicKey::from_private(&c.stacks_private_key)) - .collect::>(); + let signer_keys = signer_test.signer_public_keys(); wait_for_block_rejections_from_signers(30, &block.header.signer_signature_hash(), &signer_keys) .expect("Timed out waiting for block rejections"); @@ -8134,7 +8161,6 @@ fn block_proposal_max_age_rejections() { let short_timeout = Duration::from_secs(30); info!("------------------------- Send Block Proposal To Signers -------------------------"); - let info_before = get_chain_info(&signer_test.running_nodes.conf); let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), txs: vec![], @@ -8268,11 +8294,7 @@ fn global_acceptance_depends_on_block_announcement() { None, ); - let all_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); + let all_signers = signer_test.signer_public_keys(); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); let miner_pk = StacksPublicKey::from_private(&miner_sk); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -8775,11 +8797,7 @@ fn incoming_signers_ignore_block_proposals() { info!("------------------------- Test Attempt to Mine Invalid Block {signer_signature_hash_1} -------------------------"); let short_timeout = Duration::from_secs(30); - let all_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); + let all_signers = signer_test.signer_public_keys(); test_observer::clear(); // Propose a block to the signers that passes initial checks but will be rejected by the stacks node @@ -10576,12 +10594,7 @@ fn interrupt_miner_on_new_stacks_tip() { let (miner_pk_1, miner_pk_2) = miners.get_miner_public_keys(); let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); - let all_signers: Vec<_> = miners - .signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); + let all_signers = miners.signer_test.signer_public_keys(); // Pause Miner 2's commits to ensure Miner 1 wins the first sortition. skip_commit_op_rl2.set(true); @@ -11806,12 +11819,7 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { config.miner.block_commit_delay = Duration::from_secs(0); }, ); - let all_signers = miners - .signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + let all_signers = miners.signer_test.signer_public_keys(); let mut approving_signers = vec![]; let mut rejecting_signers = vec![]; for (i, signer_config) in miners.signer_test.signer_configs.iter().enumerate() { @@ -12053,6 +12061,8 @@ fn transfers_in_block(block: &serde_json::Value) -> usize { #[ignore] /// This test verifies that a miner will re-propose the same block if it times /// out waiting for signers to reach consensus on the block. +/// +/// Spins fn retry_proposal() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -12309,3 +12319,150 @@ fn signer_can_accept_rejected_block() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test verifies that a a signer will send update messages to stackerdb when it updates its internal state +/// +/// For a new bitcoin block arrival, the signers send a local state update message with this updated block and miner +/// For an inactive miner, the signer sends a local state update message indicating it is reverting to the prior miner +fn signers_send_state_message_updates() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + + // We want the miner to be marked as inactive so signers will send an update message indicating it. + // Therefore, set the block proposal timeout to something small enough to force a winning miner to timeout. + let block_proposal_timeout = Duration::from_secs(20); + let tenure_extend_wait_timeout = block_proposal_timeout; + let mut miners = MultipleMinerTest::new_with_config_modifications( + num_signers, + 0, + |signer_config| { + signer_config.block_proposal_timeout = block_proposal_timeout; + }, + |config| { + config.miner.tenure_extend_wait_timeout = tenure_extend_wait_timeout; + config.miner.block_commit_delay = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + ); + + let rl1_skip_commit_op = miners + .signer_test + .running_nodes + .counters + .naka_skip_commit_op + .clone(); + let rl2_skip_commit_op = miners.rl2_counters.naka_skip_commit_op.clone(); + + let (conf_1, _) = miners.get_node_configs(); + let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); + let (miner_pk_1, miner_pk_2) = miners.get_miner_public_keys(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + miners.boot_to_epoch_3(); + + let burnchain = conf_1.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let get_burn_consensus_hash = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .consensus_hash + }; + let starting_peer_height = get_chain_info(&conf_1).stacks_tip_height; + let starting_burn_height = get_burn_height(); + let mut btc_blocks_mined = 0; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Tenure Starts and Mines Block N-------------------------"); + miners + .mine_bitcoin_block_and_tenure_change_tx(&sortdb, TenureChangeCause::BlockFound, 60) + .expect("Failed to mine BTC block followed by tenure change tx."); + btc_blocks_mined += 1; + + verify_sortition_winner(&sortdb, &miner_pkh_1); + + info!("------------------------- Confirm Miner 1 is the Active Miner in Update -------------------------"); + // Verify that signers first sent a bitcoin block update + wait_for_state_machine_update( + 60, + &get_burn_consensus_hash(), + starting_burn_height + 1, + Some((miner_pkh_1, starting_peer_height)), + ) + .expect("Timed out waiting for signers to send a state update"); + + info!("------------------------- Submit Miner 2 Block Commit -------------------------"); + test_observer::clear(); + miners.submit_commit_miner_2(&sortdb); + + // Pause the block proposal broadcast so that miner 2 will be unable to broadcast its + // tenure change proposal BEFORE the block_proposal_timeout and will be marked invalid. + // Also pause miner 1's blocks so we don't go extending that tenure either + TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk_1, miner_pk_2]); + + info!("------------------------- Miner 2 Mines an Empty Tenure B -------------------------"); + miners + .mine_bitcoin_blocks_and_confirm(&sortdb, 1, 60) + .expect("Timed out waiting for BTC block"); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner 2 won + verify_sortition_winner(&sortdb, &miner_pkh_2); + + info!("------------------------- Confirm Miner 2 is the Active Miner -------------------------{}, {}, {miner_pkh_2}", starting_burn_height + 2, starting_peer_height); + // We cannot confirm the height cause some signers may or may not be aware of the delayed stacks block + wait_for_state_machine_update( + 60, + &get_burn_consensus_hash(), + starting_burn_height + 2, + Some((miner_pkh_2, starting_peer_height + 1)), + ) + .expect("Timed out waiting for signers to send their state update"); + + test_observer::clear(); + info!( + "------------------------- Wait for Miner 2 to be Marked Invalid -------------------------" + ); + // Make sure that miner 2 gets marked invalid by not proposing a block BEFORE block_proposal_timeout + std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); + // Allow miner 2 to propose its late block and see the signer get marked malicious + TEST_BROADCAST_PROPOSAL_STALL.set(vec![miner_pk_1]); + + info!("------------------------- Confirm Miner 1 is the Active Miner Again -------------------------"); + wait_for_state_machine_update( + 60, + &get_burn_consensus_hash(), + starting_burn_height + 2, + Some((miner_pkh_1, starting_peer_height)), + ) + .expect("Timed out waiting for signers to send their state update"); + + info!( + "------------------------- Confirm Burn and Stacks Block Heights -------------------------" + ); + assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); + assert_eq!( + miners.get_peer_stacks_tip_height(), + starting_peer_height + 1 + ); + miners.shutdown(); +} From 30f5596c191be03a23843f41d1f4211baf56afed Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 27 Mar 2025 06:52:14 +0100 Subject: [PATCH 218/238] improved fee computation --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 077a55a95e9..ce4acae84c3 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -829,7 +829,7 @@ impl MultipleMinerTest { let contract_tx = make_contract_call( &self.sender_sk, sender_nonce, - self.send_fee + fake_contract_tx.len() as u64, + fake_contract_tx.len() as u64, self.signer_test.running_nodes.conf.burnchain.chain_id, &tests::to_addr(&self.sender_sk), contract_name, From efb494bc1f53999f16429b9f03aeb7557ca815d4 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 27 Mar 2025 09:07:38 +0100 Subject: [PATCH 219/238] chore: fix spelling error in comments, #4613 --- .gitattributes | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitattributes b/.gitattributes index 1114324eb0a..0a0c5f7964c 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,6 +1,6 @@ legacy/* linguist-vendored # Enforcing 'lf' eol mainly for: # - 'stx-genesis' package, where txt files need hash computation and comparison -# - 'clarity' package, where clariy language is sentitive to line endings for .clar files +# - 'clarity' package, where clariy language is sensitive to line endings for .clar files # anyhow, setting eol for all text files to have a homogeneous management over the whole code base * text eol=lf From f71e0ecade290b0a21f73b5ed422df1343fff341 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 27 Mar 2025 09:47:13 -0400 Subject: [PATCH 220/238] feat: use a proper error type in `LruCache` --- stacks-common/src/util/lru_cache.rs | 51 ++++++++++++++++++----------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 24b780647ce..18524407bdf 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -40,6 +40,17 @@ impl Display for Node { } } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct LruCacheCorrupted; + +impl std::fmt::Display for LruCacheCorrupted { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "LRU cache is in a corrupted state") + } +} + +impl std::error::Error for LruCacheCorrupted {} + /// LRU cache pub struct LruCache { capacity: usize, @@ -92,10 +103,10 @@ impl LruCache { /// Get the value for the given key /// Returns an error iff the cache is corrupted and should be discarded - pub fn get(&mut self, key: &K) -> Result, ()> { + pub fn get(&mut self, key: &K) -> Result, LruCacheCorrupted> { if let Some(&index) = self.cache.get(key) { self.move_to_head(index)?; - let node = self.order.get(index).ok_or(())?; + let node = self.order.get(index).ok_or(LruCacheCorrupted)?; Ok(Some(node.value)) } else { Ok(None) @@ -105,14 +116,14 @@ impl LruCache { /// Insert a key-value pair into the cache, marking it as dirty. /// Returns an error iff the cache is corrupted and should be discarded /// Returns `Ok(Some((K, V)))` if a dirty value was evicted. - pub fn insert(&mut self, key: K, value: V) -> Result, ()> { + pub fn insert(&mut self, key: K, value: V) -> Result, LruCacheCorrupted> { self.insert_with_dirty(key, value, true) } /// Insert a key-value pair into the cache, marking it as clean. /// Returns an error iff the cache is corrupted and should be discarded /// Returns `Ok(Some((K, V)))` if a dirty value was evicted. - pub fn insert_clean(&mut self, key: K, value: V) -> Result, ()> { + pub fn insert_clean(&mut self, key: K, value: V) -> Result, LruCacheCorrupted> { self.insert_with_dirty(key, value, false) } @@ -124,10 +135,10 @@ impl LruCache { key: K, value: V, dirty: bool, - ) -> Result, ()> { + ) -> Result, LruCacheCorrupted> { if let Some(&index) = self.cache.get(&key) { // Update an existing node - let node = self.order.get_mut(index).ok_or(())?; + let node = self.order.get_mut(index).ok_or(LruCacheCorrupted)?; node.value = value; node.dirty = dirty; self.move_to_head(index)?; @@ -139,7 +150,7 @@ impl LruCache { // We've reached capacity. Evict the least-recently used value // and reuse its node let index = self.evict_lru()?; - let tail_node = self.order.get_mut(index).ok_or(())?; + let tail_node = self.order.get_mut(index).ok_or(LruCacheCorrupted)?; // Replace the key with the new key, saving the old key let replaced_key = std::mem::replace(&mut tail_node.key, key.clone()); @@ -186,7 +197,7 @@ impl LruCache { pub fn flush( &mut self, mut f: impl FnMut(&K, V) -> Result<(), E>, - ) -> Result, ()> { + ) -> Result, LruCacheCorrupted> { let mut current = self.head; // Keep track of visited nodes to detect cycles @@ -195,10 +206,10 @@ impl LruCache { while current != self.capacity { // Detect cycles if !visited.insert(current) { - return Err(()); + return Err(LruCacheCorrupted); } - let node = self.order.get_mut(current).ok_or(())?; + let node = self.order.get_mut(current).ok_or(LruCacheCorrupted)?; let next = node.next; if node.dirty { let value = node.value; @@ -216,8 +227,8 @@ impl LruCache { } /// Helper function to remove a node from the linked list (by index) - fn detach_node(&mut self, index: usize) -> Result<(), ()> { - let node = self.order.get(index).ok_or(())?; + fn detach_node(&mut self, index: usize) -> Result<(), LruCacheCorrupted> { + let node = self.order.get(index).ok_or(LruCacheCorrupted)?; let prev = node.prev; let next = node.next; @@ -226,7 +237,7 @@ impl LruCache { self.tail = prev; } else { // Else, update the next node to point to the previous node - let next_node = self.order.get_mut(next).ok_or(())?; + let next_node = self.order.get_mut(next).ok_or(LruCacheCorrupted)?; next_node.prev = prev; } @@ -235,7 +246,7 @@ impl LruCache { self.head = next; } else { // Else, update the previous node to point to the next node - let prev_node = self.order.get_mut(prev).ok_or(())?; + let prev_node = self.order.get_mut(prev).ok_or(LruCacheCorrupted)?; prev_node.next = next; } @@ -243,14 +254,14 @@ impl LruCache { } /// Helper function to attach a node as the head of the linked list - fn attach_as_head(&mut self, index: usize) -> Result<(), ()> { - let node = self.order.get_mut(index).ok_or(())?; + fn attach_as_head(&mut self, index: usize) -> Result<(), LruCacheCorrupted> { + let node = self.order.get_mut(index).ok_or(LruCacheCorrupted)?; node.prev = self.capacity; node.next = self.head; if self.head != self.capacity { // If there is a head, update its previous pointer to this one - let head_node = self.order.get_mut(self.head).ok_or(())?; + let head_node = self.order.get_mut(self.head).ok_or(LruCacheCorrupted)?; head_node.prev = index; } else { // Else, the list was empty, so update the tail @@ -261,7 +272,7 @@ impl LruCache { } /// Helper function to move a node to the head of the linked list - fn move_to_head(&mut self, index: usize) -> Result<(), ()> { + fn move_to_head(&mut self, index: usize) -> Result<(), LruCacheCorrupted> { if index == self.head { // If the node is already the head, do nothing return Ok(()); @@ -274,14 +285,14 @@ impl LruCache { /// Helper function to evict the least-recently used node, which is the /// tail of the linked list /// Returns the index of the evicted node - fn evict_lru(&mut self) -> Result { + fn evict_lru(&mut self) -> Result { let index = self.tail; if index == self.capacity { // If the list is empty, do nothing return Ok(self.capacity); } self.detach_node(index)?; - let node = self.order.get(index).ok_or(())?; + let node = self.order.get(index).ok_or(LruCacheCorrupted)?; self.cache.remove(&node.key); Ok(index) } From f26d12526551b4b1f09f44fab1a08291f8cd9165 Mon Sep 17 00:00:00 2001 From: Federico De Felici Date: Thu, 27 Mar 2025 16:02:23 +0100 Subject: [PATCH 221/238] chore: fix mispelled clarity word in comment, #4613 Co-authored-by: wileyj <2847772+wileyj@users.noreply.github.com> --- .gitattributes | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitattributes b/.gitattributes index 0a0c5f7964c..e4582d8ca72 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,6 +1,6 @@ legacy/* linguist-vendored # Enforcing 'lf' eol mainly for: # - 'stx-genesis' package, where txt files need hash computation and comparison -# - 'clarity' package, where clariy language is sensitive to line endings for .clar files +# - 'clarity' package, where clarity language is sensitive to line endings for .clar files # anyhow, setting eol for all text files to have a homogeneous management over the whole code base * text eol=lf From f4cd7260e28ed91d08157b51b8c6b2ed120835bf Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 27 Mar 2025 13:12:04 -0700 Subject: [PATCH 222/238] CRC: move statemachineupdate todo message to SignerMessages instead of minerMessages --- stacks-signer/src/v0/signer.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 97b35693c65..6c26eeb9f0a 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -234,10 +234,17 @@ impl SignerTrait for Signer { ); // try and gather signatures for message in messages { - let SignerMessage::BlockResponse(block_response) = message else { - continue; - }; - self.handle_block_response(stacks_client, block_response, sortition_state); + match message { + SignerMessage::BlockResponse(block_response) => self.handle_block_response( + stacks_client, + block_response, + sortition_state, + ), + SignerMessage::StateMachineUpdate(_update) => { + // TODO: should make note of this update view point to determine if there is an agreed upon global state + } + _ => {} + } } } SignerEvent::MinerMessages(messages) => { @@ -300,9 +307,6 @@ impl SignerTrait for Signer { self.mock_sign(mock_proposal.clone()); } } - SignerMessage::StateMachineUpdate(_update) => { - // TODO: should make note of this update view point to determine if there is an agreed upon global state - } _ => {} } } From 270437c4ec48ff7becb2c5a34dc2d7a869770324 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 28 Mar 2025 15:28:35 -0400 Subject: [PATCH 223/238] refactor: simplify `LruCache::flush` --- stacks-common/src/util/lru_cache.rs | 37 +++++++++-------------------- 1 file changed, 11 insertions(+), 26 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 18524407bdf..608b99611a5 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -15,7 +15,7 @@ use std::fmt::Display; -use hashbrown::{HashMap, HashSet}; +use hashbrown::HashMap; /// Node in the doubly linked list struct Node { @@ -198,30 +198,11 @@ impl LruCache { &mut self, mut f: impl FnMut(&K, V) -> Result<(), E>, ) -> Result, LruCacheCorrupted> { - let mut current = self.head; - - // Keep track of visited nodes to detect cycles - let mut visited = HashSet::new(); - - while current != self.capacity { - // Detect cycles - if !visited.insert(current) { - return Err(LruCacheCorrupted); - } - - let node = self.order.get_mut(current).ok_or(LruCacheCorrupted)?; - let next = node.next; - if node.dirty { - let value = node.value; - - // Call the flush function - match f(&node.key, value) { - Ok(()) => node.dirty = false, - Err(e) => return Ok(Err(e)), - } - node.dirty = false; + for node in self.order.iter_mut().filter(|n| n.dirty) { + match f(&node.key, node.value) { + Ok(()) => node.dirty = false, + Err(e) => return Ok(Err(e)), } - current = next; } Ok(Ok(())) } @@ -374,7 +355,8 @@ mod tests { .expect("cache corrupted") .expect("flush failed"); - assert_eq!(flushed, vec![(2, 2), (1, 3)]); + flushed.sort(); + assert_eq!(flushed, vec![(1, 3), (2, 2)]); } #[test] @@ -407,7 +389,8 @@ mod tests { .expect("cache corrupted") .expect("flush failed"); - assert_eq!(flushed, [(3, 3), (2, 2)]); + flushed.sort(); + assert_eq!(flushed, [(2, 2), (3, 3)]); } /// Simple LRU implementation for testing @@ -573,6 +556,8 @@ mod property_tests { simple_flushed.push((*k, v)); Ok::<(), ()>(()) }).unwrap(); + flushed.sort(); + simple_flushed.sort(); prop_assert_eq!(flushed, simple_flushed); } }; From 1742f8fce2eb54f9b7fafb6dde5ac514ebec8569 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 28 Mar 2025 15:34:12 -0400 Subject: [PATCH 224/238] test: add `LruCache` tests with capacity == 1 --- stacks-common/src/util/lru_cache.rs | 68 +++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 608b99611a5..749234163fb 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -393,6 +393,74 @@ mod tests { assert_eq!(flushed, [(2, 2), (3, 3)]); } + #[test] + fn test_lru_cache_capacity_one() { + let mut cache = LruCache::new(1); + + cache.insert(1, 1).expect("cache corrupted"); + assert_eq!(cache.get(&1).expect("cache corrupted"), Some(1)); + + cache.insert(2, 2).expect("cache corrupted"); + assert_eq!(cache.get(&1).expect("cache corrupted"), None); + assert_eq!(cache.get(&2).expect("cache corrupted"), Some(2)); + } + + #[test] + fn test_lru_cache_capacity_one_update() { + let mut cache = LruCache::new(1); + + cache.insert(1, 1).expect("cache corrupted"); + cache.insert(1, 2).expect("cache corrupted"); + assert_eq!(cache.get(&1).expect("cache corrupted"), Some(2)); + + cache.insert(2, 3).expect("cache corrupted"); + assert_eq!(cache.get(&1).expect("cache corrupted"), None); + assert_eq!(cache.get(&2).expect("cache corrupted"), Some(3)); + } + + #[test] + fn test_lru_cache_capacity_one_eviction() { + let mut cache = LruCache::new(1); + + assert!(cache.insert(1, 1).expect("cache corrupted").is_none()); + let evicted = cache + .insert(2, 2) + .expect("cache corrupted") + .expect("expected eviction"); + assert_eq!(evicted, (1, 1)); + } + + #[test] + fn test_lru_cache_capacity_one_flush() { + let mut cache = LruCache::new(1); + + cache.insert(1, 1).expect("cache corrupted"); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .expect("cache corrupted") + .expect("flush failed"); + + assert_eq!(flushed, vec![(1, 1)]); + + cache.insert(2, 2).expect("cache corrupted"); + + let mut flushed = Vec::new(); + cache + .flush(|k, v| { + flushed.push((*k, v)); + Ok::<(), ()>(()) + }) + .expect("cache corrupted") + .expect("flush failed"); + + assert_eq!(flushed, vec![(2, 2)]); + } + /// Simple LRU implementation for testing pub struct SimpleLRU { pub cache: Vec>, From d64b3679a149e31a2fb71300a954c858d9ccc741 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 28 Mar 2025 15:34:44 -0400 Subject: [PATCH 225/238] test: use default number of cases for proptests Since we switched to the safer fallible implementation, there is no need to spend so much time on these tests. --- stacks-common/src/util/lru_cache.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 749234163fb..486b1cba0a2 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -540,8 +540,6 @@ mod property_tests { } proptest! { - #![proptest_config(ProptestConfig::with_cases(1_000_000))] - #[test] fn doesnt_crash_with_random_operations(ops in prop::collection::vec(arbitrary_op(), 1..1000)) { let mut cache = LruCache::new(10); From 6713580d4d89ff8897f4140f573fe556e9b46c04 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 28 Mar 2025 15:41:40 -0400 Subject: [PATCH 226/238] chore: update comment on `SimpleLRU` struct --- stacks-common/src/util/lru_cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 486b1cba0a2..7eb8b4eb731 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -461,7 +461,7 @@ mod tests { assert_eq!(flushed, vec![(2, 2)]); } - /// Simple LRU implementation for testing + /// Simple LRU implementation for property testing pub struct SimpleLRU { pub cache: Vec>, capacity: usize, From 9eba7469224042e6c20a51d557e469c24722737a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 28 Mar 2025 16:40:35 -0400 Subject: [PATCH 227/238] fix: flatten `Result` type of `LruCache::flush` --- stacks-common/src/util/lru_cache.rs | 43 ++++++++++++++++------------- stackslib/src/core/nonce_cache.rs | 7 +++-- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/stacks-common/src/util/lru_cache.rs b/stacks-common/src/util/lru_cache.rs index 7eb8b4eb731..5a7cdc30d00 100644 --- a/stacks-common/src/util/lru_cache.rs +++ b/stacks-common/src/util/lru_cache.rs @@ -51,6 +51,18 @@ impl std::fmt::Display for LruCacheCorrupted { impl std::error::Error for LruCacheCorrupted {} +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum FlushError { + LruCacheCorrupted, + FlushError(E), +} + +impl From for FlushError { + fn from(e: E) -> Self { + FlushError::FlushError(e) + } +} + /// LRU cache pub struct LruCache { capacity: usize, @@ -197,14 +209,12 @@ impl LruCache { pub fn flush( &mut self, mut f: impl FnMut(&K, V) -> Result<(), E>, - ) -> Result, LruCacheCorrupted> { + ) -> Result<(), FlushError> { for node in self.order.iter_mut().filter(|n| n.dirty) { - match f(&node.key, node.value) { - Ok(()) => node.dirty = false, - Err(e) => return Ok(Err(e)), - } + f(&node.key, node.value)?; + node.dirty = false; } - Ok(Ok(())) + Ok(()) } /// Helper function to remove a node from the linked list (by index) @@ -338,8 +348,7 @@ mod tests { flushed.push((*k, v)); Ok::<(), ()>(()) }) - .expect("cache corrupted") - .expect("flush failed"); + .expect("cache corrupted or flush failed"); assert_eq!(flushed, vec![(1, 1)]); @@ -352,8 +361,7 @@ mod tests { flushed.push((*k, v)); Ok::<(), ()>(()) }) - .expect("cache corrupted") - .expect("flush failed"); + .expect("cache corrupted or flush failed"); flushed.sort(); assert_eq!(flushed, vec![(1, 3), (2, 2)]); @@ -386,8 +394,7 @@ mod tests { flushed.push((*k, v)); Ok::<(), ()>(()) }) - .expect("cache corrupted") - .expect("flush failed"); + .expect("cache corrupted or flush failed"); flushed.sort(); assert_eq!(flushed, [(2, 2), (3, 3)]); @@ -442,8 +449,7 @@ mod tests { flushed.push((*k, v)); Ok::<(), ()>(()) }) - .expect("cache corrupted") - .expect("flush failed"); + .expect("cache corrupted or flush failed"); assert_eq!(flushed, vec![(1, 1)]); @@ -455,8 +461,7 @@ mod tests { flushed.push((*k, v)); Ok::<(), ()>(()) }) - .expect("cache corrupted") - .expect("flush failed"); + .expect("cache corrupted or flush failed"); assert_eq!(flushed, vec![(2, 2)]); } @@ -548,7 +553,7 @@ mod property_tests { CacheOp::Insert(k, v) => { cache.insert(k, v).expect("cache corrupted"); } CacheOp::Get(k) => { cache.get(&k).expect("cache corrupted"); } CacheOp::InsertClean(k, v) => { cache.insert_clean(k, v).expect("cache corrupted"); } - CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).expect("cache corrupted").expect("flush failed"); } + CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).expect("cache corrupted or flush failed"); } } } } @@ -572,7 +577,7 @@ mod property_tests { CacheOp::Insert(k, v) => { cache.insert(k, v).expect("cache corrupted"); } CacheOp::Get(k) => { cache.get(&k).expect("cache corrupted"); } CacheOp::InsertClean(k, v) => { cache.insert_clean(k, v).expect("cache corrupted"); } - CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).expect("cache corrupted").expect("flush failed"); } + CacheOp::Flush => { cache.flush(|_, _| Ok::<(), ()>(())).expect("cache corrupted or flush failed"); } } // Verify linked list integrity if !cache.order.is_empty() { @@ -617,7 +622,7 @@ mod property_tests { cache.flush(|k, v| { flushed.push((*k, v)); Ok::<(), ()>(()) - }).expect("cache corrupted").expect("flush failed"); + }).expect("cache corrupted or flush failed"); simple.flush(|k, v| { simple_flushed.push((*k, v)); Ok::<(), ()>(()) diff --git a/stackslib/src/core/nonce_cache.rs b/stackslib/src/core/nonce_cache.rs index 77e4aace970..a3fad4f843d 100644 --- a/stackslib/src/core/nonce_cache.rs +++ b/stackslib/src/core/nonce_cache.rs @@ -18,7 +18,7 @@ use std::thread; use std::time::Duration; use clarity::types::chainstate::StacksAddress; -use clarity::util::lru_cache::LruCache; +use clarity::util::lru_cache::{FlushError, LruCache, LruCacheCorrupted}; use clarity::vm::clarity::ClarityConnection; use rand::Rng; use rusqlite::params; @@ -181,13 +181,14 @@ impl NonceCache { tx.execute(sql, params![addr, nonce])?; Ok::<(), db_error>(()) }) { - Ok(inner) => inner?, - Err(_) => { + Ok(_) => {} + Err(FlushError::LruCacheCorrupted) => { drop(tx); // The cache is corrupt, reset it and return self.reset_cache(conn); return Ok(()); } + Err(FlushError::FlushError(e)) => return Err(e), }; tx.commit()?; From 80b4bbdff520414d60e82096d344c43fe63e143e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 31 Mar 2025 11:02:04 -0700 Subject: [PATCH 228/238] CRC: just send the update messages after processing an event Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 24 ++++++++++------- stacks-signer/src/v0/signer_state.rs | 30 +++++---------------- testnet/stacks-node/src/tests/epoch_205.rs | 3 +-- testnet/stacks-node/src/tests/signer/mod.rs | 7 ----- testnet/stacks-node/src/tests/signer/v0.rs | 25 +++++++++-------- 5 files changed, 33 insertions(+), 56 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 97d930fb067..4442df6d676 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -129,7 +129,7 @@ impl std::fmt::Display for Signer { impl SignerTrait for Signer { /// Create a new signer from the given configuration fn new(stacks_client: &StacksClient, signer_config: SignerConfig) -> Self { - let mut stackerdb = StackerDB::from(&signer_config); + let stackerdb = StackerDB::from(&signer_config); let mode = match signer_config.signer_mode { SignerConfigMode::DryRun => SignerMode::DryRun, SignerConfigMode::Normal { signer_id, .. } => SignerMode::Normal { signer_id }, @@ -141,12 +141,11 @@ impl SignerTrait for Signer { SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); let proposal_config = ProposalEvalConfig::from(&signer_config); - let signer_state = - LocalStateMachine::new(&signer_db, &mut stackerdb, stacks_client, &proposal_config) - .unwrap_or_else(|e| { - warn!("Failed to initialize local state machine for signer: {e:?}"); - LocalStateMachine::Uninitialized - }); + let signer_state = LocalStateMachine::new(&signer_db, stacks_client, &proposal_config) + .unwrap_or_else(|e| { + warn!("Failed to initialize local state machine for signer: {e:?}"); + LocalStateMachine::Uninitialized + }); Self { private_key: signer_config.stacks_private_key, stackerdb, @@ -213,8 +212,9 @@ impl SignerTrait for Signer { return; } + let prior_state = self.local_state_machine.clone(); if self.reward_cycle <= current_reward_cycle { - self.local_state_machine.handle_pending_update(&self.signer_db, &mut self.stackerdb, stacks_client, &self.proposal_config) + self.local_state_machine.handle_pending_update(&self.signer_db, stacks_client, &self.proposal_config) .unwrap_or_else(|e| error!("{self}: failed to update local state machine for pending update"; "err" => ?e)); } @@ -338,7 +338,7 @@ impl SignerTrait for Signer { panic!("{self} Failed to write burn block event to signerdb: {e}"); }); self.local_state_machine - .bitcoin_block_arrival(&self.signer_db, &mut self.stackerdb, stacks_client, &self.proposal_config, Some(*burn_height)) + .bitcoin_block_arrival(&self.signer_db, stacks_client, &self.proposal_config, Some(*burn_height)) .unwrap_or_else(|e| error!("{self}: failed to update local state machine for latest bitcoin block arrival"; "err" => ?e)); *sortition_state = None; } @@ -360,7 +360,7 @@ impl SignerTrait for Signer { "block_height" => block_height ); self.local_state_machine - .stacks_block_arrival(&mut self.stackerdb, consensus_hash, *block_height, block_id) + .stacks_block_arrival(consensus_hash, *block_height, block_id) .unwrap_or_else(|e| error!("{self}: failed to update local state machine for latest stacks block arrival"; "err" => ?e)); if let Ok(Some(mut block_info)) = self @@ -382,6 +382,10 @@ impl SignerTrait for Signer { } } } + if prior_state != self.local_state_machine { + self.local_state_machine + .send_signer_update_message(&mut self.stackerdb); + } } fn has_unprocessed_blocks(&self) -> bool { diff --git a/stacks-signer/src/v0/signer_state.rs b/stacks-signer/src/v0/signer_state.rs index f5f64cd0a01..fa13b6a4fa0 100644 --- a/stacks-signer/src/v0/signer_state.rs +++ b/stacks-signer/src/v0/signer_state.rs @@ -144,12 +144,11 @@ impl LocalStateMachine { /// and signerdb for the current sortition information pub fn new( db: &SignerDb, - stackerdb: &mut StackerDB, client: &StacksClient, proposal_config: &ProposalEvalConfig, ) -> Result { let mut instance = Self::Uninitialized; - instance.bitcoin_block_arrival(db, stackerdb, client, proposal_config, None)?; + instance.bitcoin_block_arrival(db, client, proposal_config, None)?; Ok(instance) } @@ -163,7 +162,7 @@ impl LocalStateMachine { } } - /// Send the local tate machine as an signer update message to stackerdb + /// Send the local state machine as a signer update message to stackerdb pub fn send_signer_update_message(&self, stackerdb: &mut StackerDB) { let update: Result = self.try_into(); match update { @@ -182,21 +181,16 @@ impl LocalStateMachine { pub fn handle_pending_update( &mut self, db: &SignerDb, - stackerdb: &mut StackerDB, client: &StacksClient, proposal_config: &ProposalEvalConfig, ) -> Result<(), SignerChainstateError> { let LocalStateMachine::Pending { update, .. } = self else { - return self.check_miner_inactivity(db, stackerdb, client, proposal_config); + return self.check_miner_inactivity(db, client, proposal_config); }; match update.clone() { - StateMachineUpdate::BurnBlock(expected_burn_height) => self.bitcoin_block_arrival( - db, - stackerdb, - client, - proposal_config, - Some(expected_burn_height), - ), + StateMachineUpdate::BurnBlock(expected_burn_height) => { + self.bitcoin_block_arrival(db, client, proposal_config, Some(expected_burn_height)) + } } } @@ -237,7 +231,6 @@ impl LocalStateMachine { fn check_miner_inactivity( &mut self, db: &SignerDb, - stackerdb: &mut StackerDB, client: &StacksClient, proposal_config: &ProposalEvalConfig, ) -> Result<(), SignerChainstateError> { @@ -278,8 +271,6 @@ impl LocalStateMachine { "inactive_tenure_ch" => %inactive_tenure_ch, "new_active_tenure_ch" => %new_active_tenure_ch ); - // We have updated our state, so let other signers know. - self.send_signer_update_message(stackerdb); Ok(()) } else { warn!("Current miner timed out due to inactivity, but prior miner is not valid. Allowing current miner to continue"); @@ -347,7 +338,6 @@ impl LocalStateMachine { /// Handle a new stacks block arrival pub fn stacks_block_arrival( &mut self, - stackerdb: &mut StackerDB, ch: &ConsensusHash, height: u64, block_id: &StacksBlockId, @@ -403,8 +393,6 @@ impl LocalStateMachine { *parent_tenure_last_block = *block_id; *parent_tenure_last_block_height = height; *self = LocalStateMachine::Initialized(prior_state_machine); - // We updated the block id and/or the height. Let other signers know our view has changed - self.send_signer_update_message(stackerdb); Ok(()) } @@ -452,7 +440,6 @@ impl LocalStateMachine { pub fn bitcoin_block_arrival( &mut self, db: &SignerDb, - stackerdb: &mut StackerDB, client: &StacksClient, proposal_config: &ProposalEvalConfig, mut expected_burn_height: Option, @@ -538,11 +525,6 @@ impl LocalStateMachine { current_miner: miner_state, active_signer_protocol_version: prior_state_machine.active_signer_protocol_version, }); - - if prior_state != *self { - self.send_signer_update_message(stackerdb); - } - Ok(()) } } diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 85874739969..a6b47ccc518 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -14,8 +14,7 @@ use stacks::chainstate::stacks::{ }; use stacks::config::{EventKeyType, InitialBalance}; use stacks::core::test_util::{ - make_contract_call, make_contract_call_mblock_only, make_contract_publish, - make_contract_publish_microblock_only, to_addr, + make_contract_call, make_contract_call_mblock_only, make_contract_publish, to_addr, }; use stacks::core::{ self, EpochList, StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 91d38f6b70e..4f9ee34f37c 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -1220,13 +1220,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest(accepted.into()) .expect("Failed to send accept signature"); } - - pub fn signer_public_keys(&self) -> Vec { - self.signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect() - } } fn setup_stx_btc_node( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index ec45d523fa7..aa27fdaf6e5 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -771,7 +771,6 @@ impl MultipleMinerTest { contract_src: &str, ) -> String { let http_origin = self.node_http(); - let sender_addr = tests::to_addr(&self.sender_sk); let contract_tx = make_contract_publish( &self.sender_sk, sender_nonce, @@ -6095,7 +6094,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { None, None, ); - let all_signers = signer_test.signer_public_keys(); + let all_signers = signer_test.signer_test_pks(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); @@ -6265,7 +6264,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { None, None, ); - let all_signers = signer_test.signer_public_keys(); + let all_signers = signer_test.signer_test_pks(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); @@ -6485,7 +6484,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Submitted tx {tx} in to attempt to mine block N+1"); let block_n_1 = wait_for_block_proposal(30, info_before.stacks_tip_height + 1, &miner_pk) .expect("Timed out waiting for block N+1 to be proposed"); - let all_signers = signer_test.signer_public_keys(); + let all_signers = signer_test.signer_test_pks(); wait_for_block_global_acceptance_from_signers( 30, &block_n_1.header.signer_signature_hash(), @@ -6642,7 +6641,7 @@ fn continue_after_fast_block_no_sortition() { let burnchain = conf_1.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); - let all_signers = miners.signer_test.signer_public_keys(); + let all_signers = miners.signer_test.signer_test_pks(); let get_burn_height = || { SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -6930,7 +6929,7 @@ fn signing_in_0th_tenure_of_reward_cycle() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); - let signer_public_keys = signer_test.signer_public_keys(); + let signer_public_keys = signer_test.signer_test_pks(); let long_timeout = Duration::from_secs(200); signer_test.boot_to_epoch_3(); let curr_reward_cycle = signer_test.get_current_reward_cycle(); @@ -7198,7 +7197,7 @@ fn block_commit_delay() { .expect("Timed out waiting for block commit after new Stacks block"); // Prevent a block from being mined by making signers reject it. - let all_signers = signer_test.signer_public_keys(); + let all_signers = signer_test.signer_test_pks(); TEST_REJECT_ALL_BLOCK_PROPOSAL.set(all_signers); info!("------------------------- Test Mine Burn Block -------------------------"); @@ -7464,7 +7463,7 @@ fn block_validation_check_rejection_timeout_heuristic() { ); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); let miner_pk = StacksPublicKey::from_private(&miner_sk); - let all_signers = signer_test.signer_public_keys(); + let all_signers = signer_test.signer_test_pks(); signer_test.boot_to_epoch_3(); @@ -7682,7 +7681,7 @@ fn block_validation_pending_table() { .expect("Timed out waiting for pending block validation to be removed"); // for test cleanup we need to wait for block rejections - let signer_keys = signer_test.signer_public_keys(); + let signer_keys = signer_test.signer_test_pks(); wait_for_block_rejections_from_signers(30, &block.header.signer_signature_hash(), &signer_keys) .expect("Timed out waiting for block rejections"); @@ -8411,7 +8410,7 @@ fn global_acceptance_depends_on_block_announcement() { None, ); - let all_signers = signer_test.signer_public_keys(); + let all_signers = signer_test.signer_test_pks(); let miner_sk = signer_test.running_nodes.conf.miner.mining_key.unwrap(); let miner_pk = StacksPublicKey::from_private(&miner_sk); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -8914,7 +8913,7 @@ fn incoming_signers_ignore_block_proposals() { info!("------------------------- Test Attempt to Mine Invalid Block {signer_signature_hash_1} -------------------------"); let short_timeout = Duration::from_secs(30); - let all_signers = signer_test.signer_public_keys(); + let all_signers = signer_test.signer_test_pks(); test_observer::clear(); // Propose a block to the signers that passes initial checks but will be rejected by the stacks node @@ -10711,7 +10710,7 @@ fn interrupt_miner_on_new_stacks_tip() { let (miner_pk_1, miner_pk_2) = miners.get_miner_public_keys(); let (miner_pkh_1, miner_pkh_2) = miners.get_miner_public_key_hashes(); - let all_signers = miners.signer_test.signer_public_keys(); + let all_signers = miners.signer_test.signer_test_pks(); // Pause Miner 2's commits to ensure Miner 1 wins the first sortition. skip_commit_op_rl2.set(true); @@ -11936,7 +11935,7 @@ fn mark_miner_as_invalid_if_reorg_is_rejected() { config.miner.block_commit_delay = Duration::from_secs(0); }, ); - let all_signers = miners.signer_test.signer_public_keys(); + let all_signers = miners.signer_test.signer_test_pks(); let mut approving_signers = vec![]; let mut rejecting_signers = vec![]; for (i, signer_config) in miners.signer_test.signer_configs.iter().enumerate() { From f876d4da951f39bf12d18edfe2f0d5c30171cc90 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 31 Mar 2025 11:06:37 -0700 Subject: [PATCH 229/238] Remove unnecessary clone Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer_state.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer_state.rs b/stacks-signer/src/v0/signer_state.rs index fa13b6a4fa0..3339f1645b1 100644 --- a/stacks-signer/src/v0/signer_state.rs +++ b/stacks-signer/src/v0/signer_state.rs @@ -447,7 +447,7 @@ impl LocalStateMachine { // set self to uninitialized so that if this function errors, // self is left as uninitialized. let prior_state = std::mem::replace(self, Self::Uninitialized); - let prior_state_machine = match prior_state.clone() { + let prior_state_machine = match prior_state { // if the local state machine was uninitialized, just initialize it LocalStateMachine::Uninitialized => Self::place_holder(), LocalStateMachine::Initialized(signer_state_machine) => signer_state_machine, @@ -525,6 +525,7 @@ impl LocalStateMachine { current_miner: miner_state, active_signer_protocol_version: prior_state_machine.active_signer_protocol_version, }); + Ok(()) } } From 905e8f437b2ec47be40ff1e90b1bfb1fa36bfc07 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Tue, 1 Apr 2025 18:05:52 +0300 Subject: [PATCH 230/238] Update github-release.yml --- .github/workflows/github-release.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 75155649b6e..cc5c8a93cda 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -95,6 +95,8 @@ jobs: runs-on: ubuntu-latest needs: - build-binaries + permissions: + contents: write steps: ## Creates releases - name: Create Release @@ -106,7 +108,7 @@ jobs: signer_docker_tag: ${{ inputs.signer_docker_tag }} is_node_release: ${{ inputs.is_node_release }} is_signer_release: ${{ inputs.is_signer_release }} - GH_TOKEN: ${{ secrets.GH_TOKEN }} + GH_TOKEN: ${{ github.TOKEN }} ## Builds arch dependent Docker images from binaries From be36d72c1a6015eea46a3b4668a27f0e640315bf Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Tue, 1 Apr 2025 18:06:48 +0300 Subject: [PATCH 231/238] Update ci.yml --- .github/workflows/ci.yml | 143 --------------------------------------- 1 file changed, 143 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e32148c06fc..c121f2e6371 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -103,146 +103,3 @@ jobs: is_node_release: ${{ needs.check-release.outputs.is_node_release }} is_signer_release: ${{ needs.check-release.outputs.is_signer_release }} secrets: inherit - - ## Build and push Debian image built from source - ## - ## Runs when: - ## - it is not a node or signer-only release run - docker-image: - if: | - needs.check-release.outputs.is_node_release != 'true' || - needs.check-release.outputs.is_signer_release != 'true' - name: Docker Image (Source) - uses: ./.github/workflows/image-build-source.yml - needs: - - rustfmt - - check-release - secrets: inherit - - ## Create a reusable cache for tests - ## - ## Runs when: - ## - it is a node release run - ## or any of: - ## - this workflow is called manually - ## - PR is opened - ## - PR added to merge queue - create-cache: - if: | - needs.check-release.outputs.is_node_release == 'true' || - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' - name: Create Test Cache - needs: - - rustfmt - - check-release - uses: ./.github/workflows/create-cache.yml - - ## Tests to run regularly - ## - ## Runs when: - ## - it is a node or signer-only release run - ## or any of: - ## - this workflow is called manually - ## - PR is opened - ## - PR added to merge queue - stacks-core-tests: - if: | - needs.check-release.outputs.is_node_release == 'true' || - needs.check-release.outputs.is_signer_release == 'true' || - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' - name: Stacks Core Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/stacks-core-tests.yml - - ## Checks to run on built binaries - ## - ## Runs when: - ## - it is a node or signer-only release run - ## or any of: - ## - this workflow is called manually - ## - PR is opened - ## - PR added to merge queue - stacks-core-build-tests: - if: | - needs.check-release.outputs.is_node_release == 'true' || - needs.check-release.outputs.is_signer_release == 'true' || - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' - name: Stacks Core Build Tests - needs: - - rustfmt - - check-release - uses: ./.github/workflows/core-build-tests.yml - - ## Checks to run on built binaries - ## - ## Runs when: - ## - it is a node release run - ## or any of: - ## - this workflow is called manually - ## - PR is opened - ## - PR added to merge queue - bitcoin-tests: - if: | - needs.check-release.outputs.is_node_release == 'true' || - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' - name: Bitcoin Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/bitcoin-tests.yml - - p2p-tests: - if: | - needs.check-release.outputs.is_node_release == 'true' || - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' - name: P2P Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/p2p-tests.yml - - ## Test to run on a tagged release - ## - ## Runs when: - ## - it is a node release run - atlas-tests: - if: needs.check-release.outputs.is_node_release == 'true' - name: Atlas Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/atlas-tests.yml - - epoch-tests: - if: needs.check-release.outputs.is_node_release == 'true' - name: Epoch Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/epoch-tests.yml - - slow-tests: - if: needs.check-release.outputs.is_node_release == 'true' - name: Slow Tests - needs: - - rustfmt - - create-cache - - check-release - uses: ./.github/workflows/slow-tests.yml From 45d5729050ce59a206b077c1aa62c81f2943e7aa Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Tue, 1 Apr 2025 18:09:21 +0300 Subject: [PATCH 232/238] Update github-release.yml --- .github/workflows/github-release.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index cc5c8a93cda..ffee320b826 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -158,9 +158,11 @@ jobs: - build-binaries - create-release - docker-image + permissions: + contents: write steps: - name: Open Downstream PR id: create-pr uses: stacks-network/actions/stacks-core/release/downstream-pr@main with: - token: ${{ secrets.GH_TOKEN }} + token: ${{ github.TOKEN }} From 285b20bf6727e67dbfd2253a7df6685a0a2f5f05 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Tue, 1 Apr 2025 18:09:47 +0300 Subject: [PATCH 233/238] Update versions.toml --- versions.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/versions.toml b/versions.toml index 138c89c7173..d33360ad171 100644 --- a/versions.toml +++ b/versions.toml @@ -1,4 +1,4 @@ # Update these values when a new release is created. # `stacks-common/build.rs` will automatically update `versions.rs` with these values. -stacks_node_version = "3.1.0.0.7" -stacks_signer_version = "3.1.0.0.7.0" +stacks_node_version = "6.3.0.0.0" +stacks_signer_version = "6.3.0.0.0.2" From 3f3d73255011cb873f870c243c1256ec6d997c48 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Tue, 1 Apr 2025 18:11:54 +0300 Subject: [PATCH 234/238] Update ci.yml --- .github/workflows/ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c121f2e6371..d97393761ac 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -102,4 +102,3 @@ jobs: signer_docker_tag: ${{ needs.check-release.outputs.signer_docker_tag }} is_node_release: ${{ needs.check-release.outputs.is_node_release }} is_signer_release: ${{ needs.check-release.outputs.is_signer_release }} - secrets: inherit From e2b60485c784e4de3471d65f5257b3181ab51748 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Tue, 1 Apr 2025 18:12:09 +0300 Subject: [PATCH 235/238] Update github-release.yml --- .github/workflows/github-release.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index ffee320b826..11e994e798d 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -29,9 +29,6 @@ on: description: "True if it is a signer release" required: true type: string - secrets: - GH_TOKEN: - required: true concurrency: group: github-release-${{ github.head_ref || github.ref }} From 48c54c45a3cd3f2f46972bd043f35ad87fa5e262 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Tue, 1 Apr 2025 18:12:41 +0300 Subject: [PATCH 236/238] Update github-release.yml --- .github/workflows/github-release.yml | 62 ++++++++++++++-------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 11e994e798d..4c5bfa8d586 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -113,36 +113,36 @@ jobs: ## Note: this step requires the binaries in the create-release step to be uploaded ## Runs when the following is true: ## - either node or signer tag is provided - docker-image: - if: | - inputs.node_tag != '' || - inputs.signer_tag != '' - name: Docker Image (Binary) - runs-on: ubuntu-latest - needs: - - build-binaries - - create-release - strategy: - fail-fast: false - ## Build a maximum of 2 images concurrently based on matrix.dist - max-parallel: 2 - matrix: - dist: - - alpine - - debian - steps: - - name: Create Docker Image - uses: stacks-network/actions/stacks-core/release/docker-images@main - with: - node_tag: ${{ inputs.node_tag }} - node_docker_tag: ${{ inputs.node_docker_tag }} - signer_tag: ${{ inputs.signer_tag }} - signer_docker_tag: ${{ inputs.signer_docker_tag }} - is_node_release: ${{ inputs.is_node_release }} - is_signer_release: ${{ inputs.is_signer_release }} - DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} - dist: ${{ matrix.dist }} + # docker-image: + # if: | + # inputs.node_tag != '' || + # inputs.signer_tag != '' + # name: Docker Image (Binary) + # runs-on: ubuntu-latest + # needs: + # - build-binaries + # - create-release + # strategy: + # fail-fast: false + # ## Build a maximum of 2 images concurrently based on matrix.dist + # max-parallel: 2 + # matrix: + # dist: + # - alpine + # - debian + # steps: + # - name: Create Docker Image + # uses: stacks-network/actions/stacks-core/release/docker-images@main + # with: + # node_tag: ${{ inputs.node_tag }} + # node_docker_tag: ${{ inputs.node_docker_tag }} + # signer_tag: ${{ inputs.signer_tag }} + # signer_docker_tag: ${{ inputs.signer_docker_tag }} + # is_node_release: ${{ inputs.is_node_release }} + # is_signer_release: ${{ inputs.is_signer_release }} + # DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + # DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + # dist: ${{ matrix.dist }} ## Create the downstream PR for the release branch to master,develop create-pr: @@ -154,7 +154,7 @@ jobs: needs: - build-binaries - create-release - - docker-image + # - docker-image permissions: contents: write steps: From f5e1be47a3a86a7de987ddfe8352371e361638db Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Tue, 1 Apr 2025 18:44:27 +0300 Subject: [PATCH 237/238] Update github-release.yml --- .github/workflows/github-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 4c5bfa8d586..6f4ab39f4bc 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -156,7 +156,7 @@ jobs: - create-release # - docker-image permissions: - contents: write + pull-requests: write steps: - name: Open Downstream PR id: create-pr From 0410ee540356235fca871d21d5cdc9f06586a618 Mon Sep 17 00:00:00 2001 From: BowTiedDevOps <157840260+BowTiedDevOps@users.noreply.github.com> Date: Wed, 2 Apr 2025 16:22:24 +0300 Subject: [PATCH 238/238] feat: change token name in release workflow --- .github/workflows/github-release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 6f4ab39f4bc..b796f36bee0 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -105,7 +105,7 @@ jobs: signer_docker_tag: ${{ inputs.signer_docker_tag }} is_node_release: ${{ inputs.is_node_release }} is_signer_release: ${{ inputs.is_signer_release }} - GH_TOKEN: ${{ github.TOKEN }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} ## Builds arch dependent Docker images from binaries @@ -162,4 +162,4 @@ jobs: id: create-pr uses: stacks-network/actions/stacks-core/release/downstream-pr@main with: - token: ${{ github.TOKEN }} + token: ${{ secrets.GITHUB_TOKEN }}