From 94b5ce88ed95c775557180b2f118b04af6fc4a3a Mon Sep 17 00:00:00 2001 From: janniks Date: Tue, 20 Feb 2024 15:57:39 +0100 Subject: [PATCH 001/109] fix: include txid in failure logs --- .../src/chainstate/stacks/db/transactions.rs | 59 ++++++++++++++----- stackslib/src/chainstate/stacks/miner.rs | 2 +- 2 files changed, 44 insertions(+), 17 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index e5e0bf19e9..85da970311 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -550,6 +550,7 @@ impl StacksChainState { post_condition_mode: &TransactionPostConditionMode, origin_account: &StacksAccount, asset_map: &AssetMap, + txid: Txid, ) -> Result { let mut checked_fungible_assets: HashMap> = HashMap::new(); @@ -578,7 +579,7 @@ impl StacksChainState { if !condition_code.check(*amount_sent_condition as u128, amount_sent) { info!( "Post-condition check failure on STX owned by {}: {:?} {:?} {}", - account_principal, amount_sent_condition, condition_code, amount_sent + account_principal, amount_sent_condition, condition_code, amount_sent; "txid" => %txid ); return Ok(false); } @@ -622,7 +623,7 @@ impl StacksChainState { .get_fungible_tokens(&account_principal, &asset_id) .unwrap_or(0); if !condition_code.check(*amount_sent_condition as u128, amount_sent) { - info!("Post-condition check failure on fungible asset {} owned by {}: {} {:?} {}", &asset_id, account_principal, amount_sent_condition, condition_code, amount_sent); + info!("Post-condition check failure on fungible asset {} owned by {}: {} {:?} {}", &asset_id, account_principal, amount_sent_condition, condition_code, amount_sent; "txid" => %txid); return Ok(false); } @@ -656,7 +657,7 @@ impl StacksChainState { .get_nonfungible_tokens(&account_principal, &asset_id) .unwrap_or(&empty_assets); if !condition_code.check(asset_value, assets_sent) { - info!("Post-condition check failure on non-fungible asset {} owned by {}: {:?} {:?}", &asset_id, account_principal, &asset_value, condition_code); + info!("Post-condition check failure on non-fungible asset {} owned by {}: {:?} {:?}", &asset_id, account_principal, &asset_value, condition_code; "txid" => %txid); return Ok(false); } @@ -698,18 +699,18 @@ impl StacksChainState { // each value must be covered for v in values { if !nfts.contains(&v.clone().try_into()?) { - info!("Post-condition check failure: Non-fungible asset {} value {:?} was moved by {} but not checked", &asset_identifier, &v, &principal); + info!("Post-condition check failure: Non-fungible asset {} value {:?} was moved by {} but not checked", &asset_identifier, &v, &principal; "txid" => %txid); return Ok(false); } } } else { // no values covered - info!("Post-condition check failure: No checks for non-fungible asset type {} moved by {}", &asset_identifier, &principal); + info!("Post-condition check failure: No checks for non-fungible asset type {} moved by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } else { // no NFT for this principal - info!("Post-condition check failure: No checks for any non-fungible assets, but moved {} by {}", &asset_identifier, &principal); + info!("Post-condition check failure: No checks for any non-fungible assets, but moved {} by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } @@ -719,11 +720,11 @@ impl StacksChainState { checked_fungible_assets.get(&principal) { if !checked_ft_asset_ids.contains(&asset_identifier) { - info!("Post-condition check failure: checks did not cover transfer of {} by {}", &asset_identifier, &principal); + info!("Post-condition check failure: checks did not cover transfer of {} by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } else { - info!("Post-condition check failure: No checks for fungible token type {} moved by {}", &asset_identifier, &principal); + info!("Post-condition check failure: No checks for fungible token type {} moved by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } @@ -950,14 +951,14 @@ impl StacksChainState { // Their presence in this variant makes the transaction invalid. if tx.post_conditions.len() > 0 { let msg = format!("Invalid Stacks transaction: TokenTransfer transactions do not support post-conditions"); - warn!("{}", &msg); + warn!("{}", &msg; "txid" => %tx.txid()); return Err(Error::InvalidStacksTransaction(msg, false)); } if *addr == origin_account.principal { let msg = format!("Invalid TokenTransfer: address tried to send to itself"); - warn!("{}", &msg); + warn!("{}", &msg; "txid" => %tx.txid()); return Err(Error::InvalidStacksTransaction(msg, false)); } @@ -1009,6 +1010,7 @@ impl StacksChainState { &tx.post_condition_mode, origin_account, asset_map, + tx.txid(), ) .expect("FATAL: error while evaluating post-conditions") }, @@ -1026,7 +1028,8 @@ impl StacksChainState { "function_name" => %contract_call.function_name, "function_args" => %VecDisplay(&contract_call.function_args), "return_value" => %return_value, - "cost" => ?total_cost); + "cost" => ?total_cost, + "txid" => %tx.txid()); (return_value, asset_map, events) } Err(e) => match handle_clarity_runtime_error(e) { @@ -1035,14 +1038,16 @@ impl StacksChainState { "contract_name" => %contract_id, "function_name" => %contract_call.function_name, "function_args" => %VecDisplay(&contract_call.function_args), - "error" => ?error); + "error" => ?error, + "txid" => %tx.txid()); (Value::err_none(), AssetMap::new(), vec![]) } ClarityRuntimeTxError::AbortedByCallback(value, assets, events) => { info!("Contract-call aborted by post-condition"; "contract_name" => %contract_id, "function_name" => %contract_call.function_name, - "function_args" => %VecDisplay(&contract_call.function_args)); + "function_args" => %VecDisplay(&contract_call.function_args), + "txid" => %tx.txid()); let receipt = StacksTransactionReceipt::from_condition_aborted_contract_call( tx.clone(), events, @@ -1063,7 +1068,8 @@ impl StacksChainState { "contract_name" => %contract_id, "function_name" => %contract_call.function_name, "function_args" => %VecDisplay(&contract_call.function_args), - "error" => %check_error); + "error" => %check_error, + "txid" => %tx.txid()); let receipt = StacksTransactionReceipt::from_runtime_failure_contract_call( @@ -1078,7 +1084,8 @@ impl StacksChainState { "contract_name" => %contract_id, "function_name" => %contract_call.function_name, "function_args" => %VecDisplay(&contract_call.function_args), - "error" => %check_error); + "error" => %check_error, + "txid" => %tx.txid()); return Err(Error::ClarityError(clarity_error::Interpreter( InterpreterError::Unchecked(check_error), ))); @@ -1089,7 +1096,8 @@ impl StacksChainState { "contract_name" => %contract_id, "function_name" => %contract_call.function_name, "function_args" => %VecDisplay(&contract_call.function_args), - "error" => ?e); + "error" => ?e, + "txid" => %tx.txid()); return Err(Error::ClarityError(e)); } }, @@ -1226,6 +1234,7 @@ impl StacksChainState { &tx.post_condition_mode, origin_account, asset_map, + tx.txid(), ) .expect("FATAL: error while evaluating post-conditions") }, @@ -6796,6 +6805,12 @@ pub mod test { mode, origin, &ft_transfer_2, + Txid( + "1232121232121232121232121232121232121232121232121232121232121232" + .as_bytes() + .try_into() + .unwrap(), + ), ) .unwrap(); if result != expected_result { @@ -7149,6 +7164,12 @@ pub mod test { mode, origin, &nft_transfer_2, + Txid( + "1232121232121232121232121232121232121232121232121232121232121232" + .as_bytes() + .try_into() + .unwrap(), + ), ) .unwrap(); if result != expected_result { @@ -7966,6 +7987,12 @@ pub mod test { post_condition_mode, origin_account, asset_map, + Txid( + "1232121232121232121232121232121232121232121232121232121232121232" + .as_bytes() + .try_into() + .unwrap(), + ), ) .unwrap(); if result != expected_result { diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index fd664a7dbf..04c3d7a2b5 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -548,7 +548,7 @@ impl TransactionResult { // recover original ClarityError ClarityRuntimeTxError::Acceptable { error, .. } => { if let clarity_error::Parse(ref parse_err) = error { - info!("Parse error: {}", parse_err); + info!("Parse error: {}", parse_err; "txid" => %tx.txid()); match &parse_err.err { ParseErrors::ExpressionStackDepthTooDeep | ParseErrors::VaryExpressionStackDepthTooDeep => { From 5f1173a647eef94ea9ba702683efeb9955a158b7 Mon Sep 17 00:00:00 2001 From: janniks Date: Mon, 6 May 2024 18:43:55 +0800 Subject: [PATCH 002/109] test: ensure auto-generated test contract names are not too long --- stackslib/src/net/mod.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index ddf0e6a713..4cea3a684d 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1656,6 +1656,7 @@ pub mod test { use std::{fs, io, thread}; use clarity::boot_util::boot_code_id; + use clarity::vm::ast::parser::v1::CONTRACT_MAX_NAME_LENGTH; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::STXBalance; @@ -2403,7 +2404,17 @@ pub mod test { let smart_contract = TransactionPayload::SmartContract( TransactionSmartContract { name: ContractName::try_from( - conf.test_name.replace("::", "-").to_string(), + conf.test_name + .replace("::", "-") + .to_string() + .chars() + // ensure auto-generated contract names are not too long + .skip( + conf.test_name + .len() + .saturating_sub(CONTRACT_MAX_NAME_LENGTH), + ) + .collect::(), ) .expect("FATAL: invalid boot-code contract name"), code_body: StacksString::from_str(&conf.setup_code) From 86dd3df250db8ee6c11d5a43c0aed41b99a94b98 Mon Sep 17 00:00:00 2001 From: janniks Date: Mon, 6 May 2024 19:12:56 +0800 Subject: [PATCH 003/109] test: remove leading dashes from auto-generated test contract names --- stackslib/src/net/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 4cea3a684d..9ba37f252a 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2414,7 +2414,9 @@ pub mod test { .len() .saturating_sub(CONTRACT_MAX_NAME_LENGTH), ) - .collect::(), + .collect::() + .trim_start_matches('-') // Remove leading '-' + .to_string(), ) .expect("FATAL: invalid boot-code contract name"), code_body: StacksString::from_str(&conf.setup_code) From 86eea3f4bbe2dd0ccf0cef51a6b12fc588667c15 Mon Sep 17 00:00:00 2001 From: janniks Date: Mon, 6 May 2024 19:44:32 +0800 Subject: [PATCH 004/109] test: trim leading non-alpha from test name --- stackslib/src/net/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 9ba37f252a..6b87af5c4c 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2415,7 +2415,7 @@ pub mod test { .saturating_sub(CONTRACT_MAX_NAME_LENGTH), ) .collect::() - .trim_start_matches('-') // Remove leading '-' + .trim_start_matches(|c: char| !c.is_alphabetic()) .to_string(), ) .expect("FATAL: invalid boot-code contract name"), From 464b319f94236c52996eaf6d863073595b607234 Mon Sep 17 00:00:00 2001 From: janniks Date: Mon, 14 Oct 2024 15:44:00 +0200 Subject: [PATCH 005/109] chore: address pr review comments --- stackslib/src/net/mod.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 3dbb615157..676f13f18c 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2487,7 +2487,7 @@ pub mod test { name: ContractName::try_from( conf.test_name .replace("::", "-") - .to_string() + .trim_start_matches(|c: char| !c.is_alphabetic()) .chars() // ensure auto-generated contract names are not too long .skip( @@ -2495,9 +2495,7 @@ pub mod test { .len() .saturating_sub(CONTRACT_MAX_NAME_LENGTH), ) - .collect::() - .trim_start_matches(|c: char| !c.is_alphabetic()) - .to_string(), + .collect::(), ) .expect("FATAL: invalid boot-code contract name"), code_body: StacksString::from_str(&conf.setup_code) From 8a36a278704bf4e656d61bdd236d7867a806cf43 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Tue, 22 Oct 2024 10:29:03 -0700 Subject: [PATCH 006/109] feat: allow pretty print logging in tests --- stacks-common/src/util/log.rs | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index 9d52f0dbbf..e86ed7f44b 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -215,14 +215,16 @@ fn make_json_logger() -> Logger { panic!("Tried to construct JSON logger, but stacks-blockchain built without slog_json feature enabled.") } -#[cfg(not(any(test, feature = "testing")))] fn make_logger() -> Logger { if env::var("STACKS_LOG_JSON") == Ok("1".into()) { make_json_logger() } else { let debug = env::var("STACKS_LOG_DEBUG") == Ok("1".into()); let pretty_print = env::var("STACKS_LOG_PP") == Ok("1".into()); + #[cfg(not(any(test, feature = "testing")))] let decorator = slog_term::PlainSyncDecorator::new(std::io::stderr()); + #[cfg(any(test, feature = "testing"))] + let decorator = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); let atty = isatty(Stream::Stderr); let drain = TermFormat::new(decorator, pretty_print, debug, atty); let logger = Logger::root(drain.ignore_res(), o!()); @@ -230,20 +232,6 @@ fn make_logger() -> Logger { } } -#[cfg(any(test, feature = "testing"))] -fn make_logger() -> Logger { - if env::var("STACKS_LOG_JSON") == Ok("1".into()) { - make_json_logger() - } else { - let debug = env::var("STACKS_LOG_DEBUG") == Ok("1".into()); - let plain = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); - let isatty = isatty(Stream::Stdout); - let drain = TermFormat::new(plain, false, debug, isatty); - let logger = Logger::root(drain.ignore_res(), o!()); - logger - } -} - fn inner_get_loglevel() -> slog::Level { if env::var("STACKS_LOG_TRACE") == Ok("1".into()) { slog::Level::Trace From f5911fbf181d0470efedaaabe2ec05720dfae26a Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 24 Oct 2024 07:30:16 -0700 Subject: [PATCH 007/109] fix: make get_decorator its own function --- stacks-common/src/util/log.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index e86ed7f44b..534f3f9969 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -221,10 +221,7 @@ fn make_logger() -> Logger { } else { let debug = env::var("STACKS_LOG_DEBUG") == Ok("1".into()); let pretty_print = env::var("STACKS_LOG_PP") == Ok("1".into()); - #[cfg(not(any(test, feature = "testing")))] - let decorator = slog_term::PlainSyncDecorator::new(std::io::stderr()); - #[cfg(any(test, feature = "testing"))] - let decorator = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); + let decorator = get_decorator(); let atty = isatty(Stream::Stderr); let drain = TermFormat::new(decorator, pretty_print, debug, atty); let logger = Logger::root(drain.ignore_res(), o!()); @@ -232,6 +229,16 @@ fn make_logger() -> Logger { } } +#[cfg(any(test, feature = "testing"))] +fn get_decorator() -> slog_term::PlainSyncDecorator { + slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter) +} + +#[cfg(not(any(test, feature = "testing")))] +fn get_decorator() -> slog_term::PlainSyncDecorator { + slog_term::PlainSyncDecorator::new(std::io::stderr()) +} + fn inner_get_loglevel() -> slog::Level { if env::var("STACKS_LOG_TRACE") == Ok("1".into()) { slog::Level::Trace From f947ea04446690bf1df2bca7de2197c1ed66460b Mon Sep 17 00:00:00 2001 From: janniks Date: Thu, 24 Oct 2024 16:46:43 +0200 Subject: [PATCH 008/109] fix: remove duplicate txid log --- stackslib/src/chainstate/stacks/db/transactions.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 4cd81dc679..42b69fb9a2 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -1061,8 +1061,7 @@ impl StacksChainState { "function_name" => %contract_call.function_name, "function_args" => %VecDisplay(&contract_call.function_args), "return_value" => %return_value, - "cost" => ?total_cost, - "txid" => %tx.txid()); + "cost" => ?total_cost); (return_value, asset_map, events) } Err(e) => match handle_clarity_runtime_error(e) { From ed415b938e99d56c500245a4ca712413a0346cf4 Mon Sep 17 00:00:00 2001 From: janniks Date: Thu, 24 Oct 2024 16:51:18 +0200 Subject: [PATCH 009/109] fix: remove duplicate txid log --- .../src/chainstate/stacks/db/transactions.rs | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 42b69fb9a2..456f544645 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -1073,8 +1073,7 @@ impl StacksChainState { "contract_name" => %contract_id, "function_name" => %contract_call.function_name, "function_args" => %VecDisplay(&contract_call.function_args), - "error" => ?error, - "txid" => %tx.txid()); + "error" => ?error); (Value::err_none(), AssetMap::new(), vec![]) } ClarityRuntimeTxError::AbortedByCallback(value, assets, events) => { @@ -1084,8 +1083,7 @@ impl StacksChainState { "origin_nonce" => %origin_account.nonce, "contract_name" => %contract_id, "function_name" => %contract_call.function_name, - "function_args" => %VecDisplay(&contract_call.function_args), - "txid" => %tx.txid()); + "function_args" => %VecDisplay(&contract_call.function_args)); let receipt = StacksTransactionReceipt::from_condition_aborted_contract_call( tx.clone(), events, @@ -1109,8 +1107,7 @@ impl StacksChainState { "contract_name" => %contract_id, "function_name" => %contract_call.function_name, "function_args" => %VecDisplay(&contract_call.function_args), - "error" => %check_error, - "txid" => %tx.txid()); + "error" => %check_error); let receipt = StacksTransactionReceipt::from_runtime_failure_contract_call( @@ -1128,8 +1125,7 @@ impl StacksChainState { "contract_name" => %contract_id, "function_name" => %contract_call.function_name, "function_args" => %VecDisplay(&contract_call.function_args), - "error" => %check_error, - "txid" => %tx.txid()); + "error" => %check_error); return Err(Error::ClarityError(clarity_error::Interpreter( InterpreterError::Unchecked(check_error), ))); @@ -1143,8 +1139,7 @@ impl StacksChainState { "contract_name" => %contract_id, "function_name" => %contract_call.function_name, "function_args" => %VecDisplay(&contract_call.function_args), - "error" => ?e, - "txid" => %tx.txid()); + "error" => ?e); return Err(Error::ClarityError(e)); } }, From e37f5e0ccabc881b7c99e852f461747ad7bdd877 Mon Sep 17 00:00:00 2001 From: janniks Date: Thu, 24 Oct 2024 17:33:11 +0200 Subject: [PATCH 010/109] fix: move non-alpha trim as last step --- stackslib/src/net/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 7c99e737a4..d279018d39 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2489,15 +2489,15 @@ pub mod test { name: ContractName::try_from( conf.test_name .replace("::", "-") - .trim_start_matches(|c: char| !c.is_alphabetic()) .chars() - // ensure auto-generated contract names are not too long .skip( conf.test_name .len() .saturating_sub(CONTRACT_MAX_NAME_LENGTH), ) - .collect::(), + .collect::() + .trim_start_matches(|c: char| !c.is_alphabetic()) + .to_string(), ) .expect("FATAL: invalid boot-code contract name"), code_body: StacksString::from_str(&conf.setup_code) From c3dbc5d2678ad342ec4ebe9a26628373adacb22b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 25 Oct 2024 17:25:06 -0700 Subject: [PATCH 011/109] Store the rejected block in the database in testing directive case Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 64 ++++++++++++++-------- testnet/stacks-node/src/tests/signer/v0.rs | 2 + 2 files changed, 42 insertions(+), 24 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2cb10a9817..df78c5cc7d 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -428,30 +428,8 @@ impl Signer { }; #[cfg(any(test, feature = "testing"))] - let block_response = match &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() { - Some(public_keys) => { - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private( - &self.private_key, - ), - ) { - warn!("{self}: Rejecting block proposal automatically due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::TestingDirective, - &self.private_key, - self.mainnet, - )) - } else { - None - } - } - None => block_response, - }; + let block_response = + self.test_reject_block_proposal(block_proposal, &mut block_info, block_response); if let Some(block_response) = block_response { // We know proposal is invalid. Send rejection message, do not do further validation @@ -935,6 +913,44 @@ impl Signer { false } + #[cfg(any(test, feature = "testing"))] + fn test_reject_block_proposal( + &mut self, + block_proposal: &BlockProposal, + block_info: &mut BlockInfo, + block_response: Option, + ) -> Option { + let Some(public_keys) = &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() else { + return block_response; + }; + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject + // as invalid since we rejected in a prior round if this crops up again) + // in case this is the first time we saw this block. Safe to do since this is testing case only. + self.signer_db + .insert_block(block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::TestingDirective, + &self.private_key, + self.mainnet, + )) + } else { + None + } + } + /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d0f3dfff83..976ebc2cd0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4246,6 +4246,8 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .unwrap() .replace(rejecting_signers.clone()); test_observer::clear(); + // Make a new stacks transaction to create a different block signature, but make sure to propose it + // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, From c8d00ab7fcfde6507ab9778227258db6b310fd62 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 28 Oct 2024 14:01:46 -0700 Subject: [PATCH 012/109] Wait for the tip to update before proceeding Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d0f3dfff83..2958ea1382 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4841,7 +4841,13 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // a tenure has begun, so wait until we mine a block wait_for(30, || { - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before) + let new_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && new_height > info_before.stacks_tip_height) }) .expect("Timed out waiting for block to be mined and processed"); From 28fb59b18cba896a3e8660932c5f33e4d6e7da23 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 28 Oct 2024 17:04:51 -0400 Subject: [PATCH 013/109] fix: drain the relayer channel to alleviate block download pressure --- testnet/stacks-node/src/config.rs | 2 +- .../stacks-node/src/nakamoto_node/relayer.rs | 83 ++++++++++++++----- 2 files changed, 64 insertions(+), 21 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 0beed9471d..b9c3197541 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1956,7 +1956,7 @@ impl Default for NodeConfig { max_microblocks: u16::MAX as u64, wait_time_for_microblocks: 30_000, wait_time_for_blocks: 30_000, - next_initiative_delay: 10_000, + next_initiative_delay: 1_000, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: true, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index ef01f67f4b..eb7d7db079 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -17,7 +17,7 @@ use core::fmt; use std::collections::HashSet; use std::fs; use std::io::Read; -use std::sync::mpsc::{Receiver, RecvTimeoutError}; +use std::sync::mpsc::{Receiver, RecvTimeoutError, TryRecvError}; use std::thread::JoinHandle; use std::time::{Duration, Instant}; @@ -1198,7 +1198,7 @@ impl RelayerThread { while self.globals.keep_running() { let raised_initiative = self.globals.take_initiative(); let timed_out = Instant::now() >= self.next_initiative; - let directive = if raised_initiative.is_some() || timed_out { + let mut initiative_directive = if raised_initiative.is_some() || timed_out { self.next_initiative = Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); self.initiative() @@ -1206,28 +1206,71 @@ impl RelayerThread { None }; - let directive = if let Some(directive) = directive { - directive - } else { - match relay_rcv.recv_timeout(Duration::from_millis( - self.config.node.next_initiative_delay, - )) { - Ok(directive) => directive, - Err(RecvTimeoutError::Timeout) => { - continue; + let mut handled = true; + let mut disconnect = false; + let mut try_recv = true; + let mut drained = false; + let raised_initiative_fmt = + format!("{}", raised_initiative.unwrap_or("relay_rcv".to_string())); + + debug!("Relayer: drain channel"); + // drain the channel + while !disconnect && handled && !drained { + let directive = if let Some(directive) = initiative_directive.take() { + debug!("Relayer: initiative from directive"); + directive + } else if try_recv { + // drain the channel + match relay_rcv.try_recv() { + Ok(directive) => { + debug!("Relayer: initiative from try_recv"); + directive + } + Err(TryRecvError::Empty) => { + try_recv = false; + continue; + } + Err(TryRecvError::Disconnected) => { + disconnect = true; + break; + } } - Err(RecvTimeoutError::Disconnected) => { - break; + } else { + // channel was drained, so do a time-bound recv + match relay_rcv.recv_timeout(Duration::from_millis( + self.config.node.next_initiative_delay, + )) { + Ok(directive) => { + // only do this once, so we can call .initiative() again + debug!("Relayer: initiative from recv_timeout"); + drained = true; + directive + } + Err(RecvTimeoutError::Timeout) => { + break; + } + Err(RecvTimeoutError::Disconnected) => { + disconnect = true; + break; + } } - } - }; + }; - debug!("Relayer: main loop directive"; - "directive" => %directive, - "raised_initiative" => %raised_initiative.unwrap_or("relay_rcv".to_string()), - "timed_out" => %timed_out); + debug!("Relayer: main loop directive"; + "try_recv" => %try_recv, + "drained" => %drained, + "directive" => %directive, + "raised_initiative" => %raised_initiative_fmt, + "timed_out" => %timed_out); - if !self.handle_directive(directive) { + if !self.handle_directive(directive) { + handled = false; + break; + } + } + debug!("Relayer: drained channel"); + if disconnect || !handled { + info!("Exiting relayer main loop"); break; } } From 61d701bce3e6ce477ed2bd32871f22b09e6994fd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 28 Oct 2024 17:48:07 -0400 Subject: [PATCH 014/109] chore: only consider block-bearing network results if in ibd mode or if there's download pressure --- stackslib/src/net/mod.rs | 4 ++++ testnet/stacks-node/src/nakamoto_node/peer.rs | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 2210160bee..898f91c11b 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1616,6 +1616,10 @@ impl NetworkResult { || self.has_stackerdb_chunks() } + pub fn has_block_data_to_store(&self) -> bool { + self.has_blocks() || self.has_microblocks() || self.has_nakamoto_blocks() + } + pub fn consume_unsolicited(&mut self, unhandled_messages: PendingMessages) { for ((_event_id, neighbor_key), messages) in unhandled_messages.into_iter() { for message in messages.into_iter() { diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 004023ea26..607ef00197 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -307,7 +307,8 @@ impl PeerThread { have_update = true; } - if network_result.has_data_to_store() + if ((ibd || download_backpressure) && network_result.has_block_data_to_store()) + || (!ibd && network_result.has_data_to_store()) || self.last_burn_block_height != network_result.burn_height || have_update { From e8cb18f96d315624d5e84148ef1dd37dcdabbba9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 28 Oct 2024 17:52:20 -0400 Subject: [PATCH 015/109] chore: shed network results when in ibd or with download backpressure so that we only forward results that contain blocks (drop tx and stackerdb messages) --- testnet/stacks-node/src/nakamoto_node/peer.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 607ef00197..9ae5edfe99 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -318,6 +318,16 @@ impl PeerThread { self.results_with_data .push_back(RelayerDirective::HandleNetResult(network_result)); } + + if ibd || download_backpressure { + // if we have backpressure or we're in ibd, then only keep network results that tell us + // block data or information about download and inv passes + self.results_with_data.retain(|netres| { + netres.has_block_data_to_store() + || self.last_burn_block_height != network_result.burn_height + || have_update + }) + } } Err(e) => { // this is only reachable if the network is not instantiated correctly -- From be968887651d4b8dbaa98ef930159febfa8758fd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 28 Oct 2024 18:00:01 -0400 Subject: [PATCH 016/109] chore: fix compile issues --- testnet/stacks-node/src/nakamoto_node/peer.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 9ae5edfe99..c622666765 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -322,10 +322,11 @@ impl PeerThread { if ibd || download_backpressure { // if we have backpressure or we're in ibd, then only keep network results that tell us // block data or information about download and inv passes - self.results_with_data.retain(|netres| { - netres.has_block_data_to_store() - || self.last_burn_block_height != network_result.burn_height - || have_update + self.results_with_data.retain(|netres| match netres { + RelayerDirective::HandleNetResult(netres) => { + netres.has_block_data_to_store() + } + _ => true, }) } } From c5ec5b3aea3406c499e9ab4310f84ca2813af410 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 28 Oct 2024 19:16:29 -0400 Subject: [PATCH 017/109] fix: drive main loop wakeups when we're backlogged --- testnet/stacks-node/src/nakamoto_node/peer.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index c622666765..4e208a88cc 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -317,6 +317,10 @@ impl PeerThread { self.last_burn_block_height = network_result.burn_height; self.results_with_data .push_back(RelayerDirective::HandleNetResult(network_result)); + + self.globals.raise_initiative( + "PeerThread::run_one_pass() with data-bearing network result".to_string(), + ); } if ibd || download_backpressure { @@ -340,6 +344,9 @@ impl PeerThread { while let Some(next_result) = self.results_with_data.pop_front() { // have blocks, microblocks, and/or transactions (don't care about anything else), // or a directive to mine microblocks + self.globals.raise_initiative( + "PeerThread::run_one_pass() with backlogged network results".to_string(), + ); if let Err(e) = self.globals.relay_send.try_send(next_result) { debug!( "P2P: {:?}: download backpressure detected (bufferred {})", @@ -363,9 +370,6 @@ impl PeerThread { "P2P: Dispatched result to Relayer! {} results remaining", self.results_with_data.len() ); - self.globals.raise_initiative( - "PeerThread::run_one_pass() with data-bearing network result".to_string(), - ); } } From 41c036918a0fcb7b6a7187f6747c8f1ba01c9c62 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 28 Oct 2024 22:43:13 -0700 Subject: [PATCH 018/109] Do not attempt to process a block validation response for an already globally processed block Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2cb10a9817..65d764baa8 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -563,7 +563,16 @@ impl Signer { .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) { - Ok(Some(block_info)) => block_info, + Ok(Some(block_info)) => { + if block_info.state == BlockState::GloballyRejected + || block_info.state == BlockState::GloballyAccepted + { + debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); + return None; + } else { + block_info + } + } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); From 3d67fcbca55e658c845ab5b363912c2c56937f26 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 29 Oct 2024 08:02:59 -0700 Subject: [PATCH 019/109] Remove unnecesssary elses in handle_block_validate_* Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 65d764baa8..8dc73fcef7 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -520,9 +520,8 @@ impl Signer { { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); return None; - } else { - block_info } + block_info } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? @@ -569,9 +568,8 @@ impl Signer { { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); return None; - } else { - block_info } + block_info } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? From d734345c75cc33a4d809b34f1a245914a11d9074 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 30 Oct 2024 12:25:38 -0400 Subject: [PATCH 020/109] feat: add pause after block rejections Fixes: #5405 --- testnet/stacks-node/src/config.rs | 14 +++++++++++++- testnet/stacks-node/src/nakamoto_node/miner.rs | 14 +++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 0beed9471d..b6517eadbe 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -86,7 +86,9 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const INV_REWARD_CYCLES_TESTNET: u64 = 6; -const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1000; +const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1_000; +const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; +const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] @@ -2183,6 +2185,10 @@ pub struct MinerConfig { /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined /// within the same second as its parent, it will be rejected by the signers. pub min_time_between_blocks_ms: u64, + /// Time in milliseconds to pause after receiving the first rejection, before proposing a new block. + pub first_rejection_pause_ms: u64, + /// Time in milliseconds to pause after receiving the subsequent rejections, before proposing a new block. + pub subsequent_rejection_pause_ms: u64, } impl Default for MinerConfig { @@ -2213,6 +2219,8 @@ impl Default for MinerConfig { max_reorg_depth: 3, pre_nakamoto_mock_signing: false, // Should only default true if mining key is set min_time_between_blocks_ms: DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS, + first_rejection_pause_ms: DEFAULT_FIRST_REJECTION_PAUSE_MS, + subsequent_rejection_pause_ms: DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS, } } } @@ -2575,6 +2583,8 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub pre_nakamoto_mock_signing: Option, pub min_time_between_blocks_ms: Option, + pub first_rejection_pause_ms: Option, + pub subsequent_rejection_pause_ms: Option, } impl MinerConfigFile { @@ -2688,6 +2698,8 @@ impl MinerConfigFile { } else { ms }).unwrap_or(miner_default_config.min_time_between_blocks_ms), + first_rejection_pause_ms: self.first_rejection_pause_ms.unwrap_or(miner_default_config.first_rejection_pause_ms), + subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a08c0ab353..0caf0a7088 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -283,6 +283,7 @@ impl BlockMinerThread { } let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; + let mut last_block_rejected = false; // now, actually run this tenure loop { @@ -386,15 +387,26 @@ impl BlockMinerThread { return Err(e); } _ => { - error!("Error while gathering signatures: {e:?}. Will try mining again."; + // Sleep for a bit to allow signers to catch up + let pause_ms = if last_block_rejected { + self.config.miner.subsequent_rejection_pause_ms + } else { + self.config.miner.first_rejection_pause_ms + }; + thread::sleep(Duration::from_millis(pause_ms)); + last_block_rejected = true; + + error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; "signer_sighash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); + continue; } }, }; + last_block_rejected = false; new_block.header.signer_signature = signer_signature; if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { From 6e0eca60969f0029f67dc259790dbbb30391ad22 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 30 Oct 2024 12:34:27 -0400 Subject: [PATCH 021/109] chore: move log before sleep --- testnet/stacks-node/src/nakamoto_node/miner.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 0caf0a7088..150762e965 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -393,15 +393,14 @@ impl BlockMinerThread { } else { self.config.miner.first_rejection_pause_ms }; - thread::sleep(Duration::from_millis(pause_ms)); - last_block_rejected = true; error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; "signer_sighash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); - + thread::sleep(Duration::from_millis(pause_ms)); + last_block_rejected = true; continue; } }, From 62470519f5d1ba99f0034030214e4089e04694d2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 30 Oct 2024 12:36:03 -0400 Subject: [PATCH 022/109] chore: improve comments --- testnet/stacks-node/src/config.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index b6517eadbe..5df5de28f2 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2185,9 +2185,9 @@ pub struct MinerConfig { /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined /// within the same second as its parent, it will be rejected by the signers. pub min_time_between_blocks_ms: u64, - /// Time in milliseconds to pause after receiving the first rejection, before proposing a new block. + /// Time in milliseconds to pause after receiving the first threshold rejection, before proposing a new block. pub first_rejection_pause_ms: u64, - /// Time in milliseconds to pause after receiving the subsequent rejections, before proposing a new block. + /// Time in milliseconds to pause after receiving subsequent threshold rejections, before proposing a new block. pub subsequent_rejection_pause_ms: u64, } From 0685670c55ddb3ec9c7064087e6c4ff00f10aeab Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Wed, 30 Oct 2024 13:43:44 -0400 Subject: [PATCH 023/109] chore: option to disable block pushes --- stackslib/src/net/connection.rs | 3 +++ stackslib/src/net/download/nakamoto/tenure.rs | 6 ++++++ .../net/download/nakamoto/tenure_downloader_set.rs | 8 +++++--- stackslib/src/net/relay.rs | 12 ++++++++++++ testnet/stacks-node/src/config.rs | 4 ++++ 5 files changed, 30 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 85fe9d7494..4eeec0daaf 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -474,6 +474,8 @@ pub struct ConnectionOptions { /// the reward cycle in which Nakamoto activates, and thus needs to run both the epoch /// 2.x and Nakamoto state machines. pub force_nakamoto_epoch_transition: bool, + /// Reject blocks that were pushed + pub reject_blocks_pushed: bool, // test facilitation /// Do not require that an unsolicited message originate from an authenticated, connected @@ -583,6 +585,7 @@ impl std::default::Default for ConnectionOptions { disable_stackerdb_sync: false, force_disconnect_interval: None, force_nakamoto_epoch_transition: false, + reject_blocks_pushed: false, // no test facilitations on by default test_disable_unsolicited_message_authentication: false, diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 53f9105156..ba1ac81033 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -98,6 +98,8 @@ impl WantedTenure { pub struct TenureStartEnd { /// Consensus hash that identifies the start of the tenure pub tenure_id_consensus_hash: ConsensusHash, + /// Burnchain block height of tenure ID consensus hash + pub tenure_id_burn_block_height: u64, /// Tenure-start block ID pub start_block_id: StacksBlockId, /// Last block ID @@ -119,6 +121,7 @@ pub type AvailableTenures = HashMap; impl TenureStartEnd { pub fn new( tenure_id_consensus_hash: ConsensusHash, + tenure_id_burn_block_height: u64, start_block_id: StacksBlockId, end_block_id: StacksBlockId, start_reward_cycle: u64, @@ -127,6 +130,7 @@ impl TenureStartEnd { ) -> Self { Self { tenure_id_consensus_hash, + tenure_id_burn_block_height, start_block_id, end_block_id, start_reward_cycle, @@ -214,6 +218,7 @@ impl TenureStartEnd { let tenure_start_end = TenureStartEnd::new( wt.tenure_id_consensus_hash.clone(), + wt.burn_height, wt_start.winning_block_id.clone(), wt_end.winning_block_id.clone(), rc, @@ -322,6 +327,7 @@ impl TenureStartEnd { let mut tenure_start_end = TenureStartEnd::new( wt.tenure_id_consensus_hash.clone(), + wt.burn_height, wt_start.winning_block_id.clone(), wt_end.winning_block_id.clone(), rc, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 49b32c2634..8fa845cfab 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -411,15 +411,17 @@ impl NakamotoTenureDownloaderSet { "tenure_start_block" => %tenure_info.start_block_id, "tenure_end_block" => %tenure_info.end_block_id, "tenure_start_reward_cycle" => tenure_info.start_reward_cycle, - "tenure_end_reward_cycle" => tenure_info.end_reward_cycle); + "tenure_end_reward_cycle" => tenure_info.end_reward_cycle, + "tenure_burn_height" => tenure_info.tenure_id_burn_block_height); debug!( - "Download tenure {} (start={}, end={}) (rc {},{})", + "Download tenure {} (start={}, end={}) (rc {},{}) burn_height {}", &ch, &tenure_info.start_block_id, &tenure_info.end_block_id, tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle + tenure_info.end_reward_cycle, + tenure_info.tenure_id_burn_block_height, ); let tenure_download = NakamotoTenureDownloader::new( ch.clone(), diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7e4ecbb408..f923aa1281 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1703,6 +1703,7 @@ impl Relayer { sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, coord_comms: Option<&CoordinatorChannels>, + reject_blocks_pushed: bool, ) -> Result<(Vec, Vec), net_error> { let mut pushed_blocks = vec![]; let mut bad_neighbors = vec![]; @@ -1731,6 +1732,14 @@ impl Relayer { for nakamoto_block in nakamoto_blocks_data.blocks.drain(..) { let block_id = nakamoto_block.block_id(); + if reject_blocks_pushed { + debug!( + "Received pushed Nakamoto block {} from {}, but configured to reject it.", + block_id, neighbor_key + ); + continue; + } + debug!( "Received pushed Nakamoto block {} from {}", block_id, neighbor_key @@ -2092,6 +2101,7 @@ impl Relayer { /// Returns the list of Nakamoto blocks we stored, as well as the list of bad neighbors that /// sent us invalid blocks. pub fn process_new_nakamoto_blocks( + connection_opts: &ConnectionOptions, network_result: &mut NetworkResult, burnchain: &Burnchain, sortdb: &mut SortitionDB, @@ -2128,6 +2138,7 @@ impl Relayer { sortdb, chainstate, coord_comms, + connection_opts.reject_blocks_pushed, ) { Ok(x) => x, Err(e) => { @@ -2848,6 +2859,7 @@ impl Relayer { coord_comms: Option<&CoordinatorChannels>, ) -> u64 { let (accepted_blocks, bad_neighbors) = match Self::process_new_nakamoto_blocks( + &self.connection_opts, network_result, burnchain, sortdb, diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index b9c3197541..eb9db50210 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2264,6 +2264,7 @@ pub struct ConnectionOptionsFile { pub private_neighbors: Option, pub auth_token: Option, pub antientropy_retry: Option, + pub reject_blocks_pushed: Option, } impl ConnectionOptionsFile { @@ -2396,6 +2397,9 @@ impl ConnectionOptionsFile { private_neighbors: self.private_neighbors.unwrap_or(true), auth_token: self.auth_token, antientropy_retry: self.antientropy_retry.unwrap_or(default.antientropy_retry), + reject_blocks_pushed: self + .reject_blocks_pushed + .unwrap_or(default.reject_blocks_pushed), ..default }) } From eeab742c3228a544570de0f49500afa98eefd4d8 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 30 Oct 2024 13:27:06 -0700 Subject: [PATCH 024/109] Change block rejection message to generic block response Signed-off-by: Jacinta Ferrant --- stacks-signer/src/v0/signer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index e60428be6e..36f49923c3 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -348,7 +348,7 @@ impl Signer { crate::monitoring::increment_block_responses_sent(accepted); } Err(e) => { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + warn!("{self}: Failed to send block response to stacker-db: {e:?}",); } } return; From f23ffb6bd10cdfd0a169221b604c925ff1724221 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 30 Oct 2024 13:40:30 -0700 Subject: [PATCH 025/109] Add a block_proposal_validation_timeout_ms config option Signed-off-by: Jacinta Ferrant --- stacks-signer/src/config.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 6fc7c7b2dd..052817302f 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -35,6 +35,7 @@ use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 600_000; +const BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS: u64 = 120_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; #[derive(thiserror::Error, Debug)] @@ -158,6 +159,9 @@ pub struct GlobalConfig { pub block_proposal_timeout: Duration, /// An optional custom Chain ID pub chain_id: Option, + /// How long to wait for a response from a block proposal validation response from the node + /// before marking that block as invalid and rejecting it + pub block_proposal_validation_timeout: Duration, } /// Internal struct for loading up the config file @@ -187,6 +191,9 @@ struct RawConfigFile { pub block_proposal_timeout_ms: Option, /// An optional custom Chain ID pub chain_id: Option, + /// How long to wait for a response from a block proposal validation response from the node + /// before marking that block as invalid and rejecting it in milliseconds. + pub block_proposal_validation_timeout_ms: Option, } impl RawConfigFile { @@ -266,6 +273,11 @@ impl TryFrom for GlobalConfig { .unwrap_or(BLOCK_PROPOSAL_TIMEOUT_MS), ); + let block_proposal_validation_timeout = Duration::from_millis( + raw_data + .block_proposal_validation_timeout_ms + .unwrap_or(BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS), + ); Ok(Self { node_host: raw_data.node_host, endpoint, @@ -279,6 +291,7 @@ impl TryFrom for GlobalConfig { first_proposal_burn_block_timing, block_proposal_timeout, chain_id: raw_data.chain_id, + block_proposal_validation_timeout, }) } } From 9161e04de698c7978dd494f7b14b37fb4ddc9d14 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 30 Oct 2024 16:02:11 -0700 Subject: [PATCH 026/109] Track the last submitted block proposal and do not submit a new one if we already are processing one Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 3 + stacks-signer/src/client/mod.rs | 1 + stacks-signer/src/config.rs | 2 + stacks-signer/src/runloop.rs | 1 + stacks-signer/src/tests/chainstate.rs | 1 + stacks-signer/src/v0/signer.rs | 157 +++++++++++++++++- .../src/tests/nakamoto_integrations.rs | 3 + testnet/stacks-node/src/tests/signer/v0.rs | 1 + 8 files changed, 161 insertions(+), 8 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 44ae11b252..926dee312a 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -119,6 +119,8 @@ pub struct ProposalEvalConfig { pub first_proposal_burn_block_timing: Duration, /// Time between processing a sortition and proposing a block before the block is considered invalid pub block_proposal_timeout: Duration, + /// How long to wait for a block proposal validation response + pub block_proposal_validation_timeout: Duration, } impl From<&SignerConfig> for ProposalEvalConfig { @@ -126,6 +128,7 @@ impl From<&SignerConfig> for ProposalEvalConfig { Self { first_proposal_burn_block_timing: value.first_proposal_burn_block_timing, block_proposal_timeout: value.block_proposal_timeout, + block_proposal_validation_timeout: value.block_proposal_validation_timeout, } } } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 9885182d98..7e1e388e92 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -411,6 +411,7 @@ pub(crate) mod tests { db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, + block_proposal_validation_timeout: config.block_proposal_validation_timeout, } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 052817302f..ec966539ca 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -129,6 +129,8 @@ pub struct SignerConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, + /// How much time ot wait for a block proposal validation response before marking the block invalid + pub block_proposal_validation_timeout: Duration, } /// The parsed configuration for the signer diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index a0e2b739e9..de77a2a0d4 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -283,6 +283,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo mainnet: self.config.network.is_mainnet(), db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, + block_proposal_validation_timeout: self.config.block_proposal_validation_timeout, })) } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 886480f063..59cd063c9e 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -89,6 +89,7 @@ fn setup_test_environment( config: ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(5), + block_proposal_validation_timeout: Duration::from_secs(60), }, }; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index db84efbd7d..200db378b9 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,6 +15,7 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; +use std::time::Instant; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::net::api::postblock_proposal::{ @@ -85,6 +86,8 @@ pub struct Signer { pub signer_db: SignerDb, /// Configuration for proposal evaluation pub proposal_config: ProposalEvalConfig, + /// The current submitted block proposal and its submission time + pub submitted_block_proposal: Option<(BlockProposal, Instant)>, } impl std::fmt::Display for Signer { @@ -127,6 +130,7 @@ impl SignerTrait for Signer { if event_parity == Some(other_signer_parity) { return; } + self.check_submitted_block_proposal(); debug!("{self}: Processing event: {event:?}"); let Some(event) = event else { // No event. Do nothing. @@ -274,6 +278,7 @@ impl From for Signer { reward_cycle: signer_config.reward_cycle, signer_db, proposal_config, + submitted_block_proposal: None, } } } @@ -355,7 +360,7 @@ impl Signer { } info!( - "{self}: received a block proposal for a new block. Submit block for validation. "; + "{self}: received a block proposal for a new block."; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), "block_height" => block_proposal.block.header.chain_length, @@ -456,14 +461,35 @@ impl Signer { Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), } } else { - // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. - // Do not store invalid blocks as this could DOS the signer. We only store blocks that are valid or unknown. - stacks_client - .submit_block_for_validation(block_info.block.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}"); - }); + // Just in case check if the last block validation submission timed out. + self.check_submitted_block_proposal(); + if self.submitted_block_proposal.is_none() { + // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. + info!( + "{self}: submitting block proposal for validation"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + "block_height" => block_proposal.block.header.chain_length, + "burn_height" => block_proposal.burn_height, + ); + match stacks_client.submit_block_for_validation(block_info.block.clone()) { + Ok(_) => { + self.submitted_block_proposal = + Some((block_proposal.clone(), Instant::now())); + } + Err(e) => { + warn!("{self}: Failed to submit block for validation: {e:?}"); + } + }; + } else { + // Still store the block but log we can't submit it for validation. We may receive enough signatures/rejections + // from other signers to push the proposed block into a global rejection/acceptance regardless of our participation. + // However, we will not be able to participate beyond this until our block submission times out or we receive a response + // from our node. + warn!("{self}: cannot submit block proposal for validation as we are already waiting for a response for a prior submission") + } + // Do not store KNOWN invalid blocks as this could DOS the signer. We only store blocks that are valid or unknown. self.signer_db .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); @@ -493,6 +519,16 @@ impl Signer { ) -> Option { crate::monitoring::increment_block_validation_responses(true); let signer_signature_hash = block_validate_ok.signer_signature_hash; + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| { + proposal.block.header.signer_signature_hash() == signer_signature_hash + }) + .unwrap_or(false) + { + self.submitted_block_proposal = None; + } // For mutability reasons, we need to take the block_info out of the map and add it back after processing let mut block_info = match self .signer_db @@ -542,6 +578,16 @@ impl Signer { ) -> Option { crate::monitoring::increment_block_validation_responses(false); let signer_signature_hash = block_validate_reject.signer_signature_hash; + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| { + proposal.block.header.signer_signature_hash() == signer_signature_hash + }) + .unwrap_or(false) + { + self.submitted_block_proposal = None; + } let mut block_info = match self .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) @@ -617,6 +663,85 @@ impl Signer { } } + /// Check the current tracked submitted block proposal to see if it has timed out. + /// Broadcasts a rejection and marks the block locally rejected if it has. + fn check_submitted_block_proposal(&mut self) { + let Some((block_proposal, block_submission)) = self.submitted_block_proposal.clone() else { + // Nothing to check. + return; + }; + if block_submission.elapsed() < self.proposal_config.block_proposal_validation_timeout { + // Not expired yet. + return; + } + // Let us immediately flush, even if we later encounter an error broadcasting our responses. + // We should still attempt to handle a new proposal at this point. + self.submitted_block_proposal = None; + let signature_sighash = block_proposal.block.header.signer_signature_hash(); + // For mutability reasons, we need to take the block_info out of the map and add it back after processing + let mut block_info = match self + .signer_db + .block_lookup(self.reward_cycle, &signature_sighash) + { + Ok(Some(block_info)) => { + if block_info.state == BlockState::GloballyRejected + || block_info.state == BlockState::GloballyAccepted + { + // The block has already reached consensus. This should never really hit, but check just to be safe. + self.submitted_block_proposal = None; + return; + } + block_info + } + Ok(None) => { + // This is weird. If this is reached, its probably an error in code logic or the db was flushed. + // Why are we tracking a block submission for a block we have never seen / stored before. + error!("{self}: tracking an unknown block validation submission."; + "signer_sighash" => %signature_sighash, + "block_id" => %block_proposal.block.block_id(), + ); + self.submitted_block_proposal = None; + return; + } + Err(e) => { + error!("{self}: Failed to lookup block in signer db: {e:?}",); + self.submitted_block_proposal = None; + return; + } + }; + warn!( + "{self}: Failed to receive block validation response within {} ms. Rejecting block.", self.proposal_config.block_proposal_validation_timeout.as_millis(); + "signer_sighash" => %signature_sighash, + "block_id" => %block_proposal.block.block_id(), + ); + let rejection = BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::ConnectivityIssues, + &self.private_key, + self.mainnet, + ); + // We know proposal is invalid. Send rejection message, do not do further validation + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + debug!("{self}: Broadcasting a block response to stacks node: {rejection:?}"); + let res = self + .stackerdb + .send_message_with_retry::(rejection.into()); + + match res { + Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), + Ok(ack) if !ack.accepted => warn!( + "{self}: Block rejection not accepted by stacker-db: {:?}", + ack.reason + ), + Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), + } + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + } + /// Compute the signing weight, given a list of signatures fn compute_signature_signing_weight<'a>( &self, @@ -723,6 +848,14 @@ impl Signer { error!("{self}: Failed to update block state: {e:?}",); panic!("{self} Failed to update block state: {e}"); } + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| &proposal.block.header.signer_signature_hash() == block_hash) + .unwrap_or(false) + { + self.submitted_block_proposal = None; + } } /// Handle an observed signature from another signer @@ -865,6 +998,14 @@ impl Signer { } } self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| &proposal.block.header.signer_signature_hash() == block_hash) + .unwrap_or(false) + { + self.submitted_block_proposal = None; + } } fn broadcast_signed_block( diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b5140a06ee..6041c8ca28 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6450,6 +6450,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + block_proposal_validation_timeout: Duration::from_secs(100), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); @@ -6588,6 +6589,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + block_proposal_validation_timeout: Duration::from_secs(100), }; let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -6665,6 +6667,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + block_proposal_validation_timeout: Duration::from_secs(100), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 234b73684a..18c52370c2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -456,6 +456,7 @@ fn block_proposal_rejection() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + block_proposal_validation_timeout: Duration::from_secs(100), }; let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), From 44197c87f5459c7ebdf3cbdfba8ce115bd7f3f41 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 30 Oct 2024 17:15:19 -0700 Subject: [PATCH 027/109] WIP: broken test initial braindump Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + stacks-signer/src/chainstate.rs | 3 - stacks-signer/src/tests/chainstate.rs | 1 - stacks-signer/src/v0/signer.rs | 10 +- .../src/tests/nakamoto_integrations.rs | 3 - testnet/stacks-node/src/tests/signer/v0.rs | 150 +++++++++++++++++- 6 files changed, 157 insertions(+), 11 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 23eed46f1e..fd7c4f1df4 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -120,6 +120,7 @@ jobs: - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend - tests::signer::v0::multiple_miners_with_custom_chain_id + - tests::signer::v0::block_validation_response_timeout - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 926dee312a..44ae11b252 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -119,8 +119,6 @@ pub struct ProposalEvalConfig { pub first_proposal_burn_block_timing: Duration, /// Time between processing a sortition and proposing a block before the block is considered invalid pub block_proposal_timeout: Duration, - /// How long to wait for a block proposal validation response - pub block_proposal_validation_timeout: Duration, } impl From<&SignerConfig> for ProposalEvalConfig { @@ -128,7 +126,6 @@ impl From<&SignerConfig> for ProposalEvalConfig { Self { first_proposal_burn_block_timing: value.first_proposal_burn_block_timing, block_proposal_timeout: value.block_proposal_timeout, - block_proposal_validation_timeout: value.block_proposal_validation_timeout, } } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 59cd063c9e..886480f063 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -89,7 +89,6 @@ fn setup_test_environment( config: ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(5), - block_proposal_validation_timeout: Duration::from_secs(60), }, }; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 200db378b9..95769eed57 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,7 +15,7 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; -use std::time::Instant; +use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::net::api::postblock_proposal::{ @@ -86,6 +86,9 @@ pub struct Signer { pub signer_db: SignerDb, /// Configuration for proposal evaluation pub proposal_config: ProposalEvalConfig, + /// How long to wait for a block proposal validation response to arrive before + /// marking a submitted block as invalid + pub block_proposal_validation_timeout: Duration, /// The current submitted block proposal and its submission time pub submitted_block_proposal: Option<(BlockProposal, Instant)>, } @@ -279,6 +282,7 @@ impl From for Signer { signer_db, proposal_config, submitted_block_proposal: None, + block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, } } } @@ -670,7 +674,7 @@ impl Signer { // Nothing to check. return; }; - if block_submission.elapsed() < self.proposal_config.block_proposal_validation_timeout { + if block_submission.elapsed() < self.block_proposal_validation_timeout { // Not expired yet. return; } @@ -710,7 +714,7 @@ impl Signer { } }; warn!( - "{self}: Failed to receive block validation response within {} ms. Rejecting block.", self.proposal_config.block_proposal_validation_timeout.as_millis(); + "{self}: Failed to receive block validation response within {} ms. Rejecting block.", self.block_proposal_validation_timeout.as_millis(); "signer_sighash" => %signature_sighash, "block_id" => %block_proposal.block.block_id(), ); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6041c8ca28..b5140a06ee 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6450,7 +6450,6 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), - block_proposal_validation_timeout: Duration::from_secs(100), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); @@ -6589,7 +6588,6 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), - block_proposal_validation_timeout: Duration::from_secs(100), }; let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -6667,7 +6665,6 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), - block_proposal_validation_timeout: Duration::from_secs(100), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 18c52370c2..87317a78f6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -456,7 +456,6 @@ fn block_proposal_rejection() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), - block_proposal_validation_timeout: Duration::from_secs(100), }; let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), @@ -5608,3 +5607,152 @@ fn multiple_miners_with_custom_chain_id() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +// Teimout a block validation response +#[test] +#[ignore] +fn block_validation_response_timeout() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let timeout = Duration::from_secs(60); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + |config| { + config.block_proposal_validation_timeout = timeout; + }, + |_| {}, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + info!("------------------------- Test Block Validation Stalled -------------------------"); + TEST_VALIDATE_STALL.lock().unwrap().replace(true); + let validation_stall_start = Instant::now(); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + + // submit a tx so that the miner will attempt to mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(30, || { + Ok(signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst) + > proposals_before) + }) + .expect("Timed out waiting for block proposal"); + + info!("------------------------- Propose Another Block -------------------------"); + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + block_proposal_timeout: Duration::from_secs(100), + }; + let mut block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + + // Propose a block to the signers that passes initial checks but will not be submitted to the stacks node due to the submission stall + let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); + block.header.pox_treatment = BitVec::ones(1).unwrap(); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + block.header.chain_length = 1; // We have mined 1 block so far + + let block_signer_signature_hash_1 = block.header.signer_signature_hash(); + let info_before = get_chain_info(&signer_test.running_nodes.conf); + signer_test.propose_block(block, Duration::from_secs(30)); + + info!("------------------------- Waiting for Timeout -------------------------"); + // Sleep the necessary timeout to make sure the validation times out. + let elapsed = validation_stall_start.elapsed(); + std::thread::sleep(timeout.saturating_sub(elapsed)); + + info!("------------------------- Wait for Block Rejection Due to Timeout -------------------------"); + // Verify the signers rejected the first block due to timeout + let start = Instant::now(); + let mut rejected_signers = vec![]; + let mut saw_connectivity_complaint = false; + while rejected_signers.len() < num_signers { + std::thread::sleep(Duration::from_secs(1)); + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason: _reason, + reason_code, + signer_signature_hash, + signature, + .. + })) = message + { + if signer_signature_hash == block_signer_signature_hash_1 { + rejected_signers.push(signature); + if matches!(reason_code, RejectCode::ConnectivityIssues) { + saw_connectivity_complaint = true; + } + } + } + } + assert!( + start.elapsed() <= Duration::from_secs(10), + "Timed out after waiting for response from signer" + ); + } + + assert!( + saw_connectivity_complaint, + "We did not see the expected connectity rejection reason" + ); + // Make sure our chain has still not advanced + let info_after = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!(info_before, info_after); + + info!("Unpausing block validation"); + // Disable the stall and wait for the block to be processed + TEST_VALIDATE_STALL.lock().unwrap().replace(false); + + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + + assert_eq!( + get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height, + info_before.stacks_tip_height + 1, + ); +} From babd3d910862741da3db4c2be0e797cf38d1d492 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 31 Oct 2024 10:50:32 -0400 Subject: [PATCH 028/109] feat: make NetworkResults mergeable --- stackslib/src/net/mod.rs | 536 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 534 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 898f91c11b..41cdb16801 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -16,7 +16,6 @@ #[warn(unused_imports)] use std::collections::HashMap; -#[cfg(any(test, feature = "testing"))] use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::io::prelude::*; @@ -1466,7 +1465,7 @@ pub const DENY_BAN_DURATION: u64 = 86400; // seconds (1 day) pub const DENY_MIN_BAN_DURATION: u64 = 2; /// Result of doing network work -#[derive(Clone)] +#[derive(Clone, PartialEq, Debug)] pub struct NetworkResult { /// Stacks chain tip when we began this pass pub stacks_tip: StacksBlockId, @@ -1563,6 +1562,539 @@ impl NetworkResult { } } + /// Get the set of all StacksBlocks represented + fn all_block_ids(&self) -> HashSet { + let mut blocks: HashSet<_> = self + .blocks + .iter() + .map(|(ch, blk, _)| StacksBlockId::new(&ch, &blk.block_hash())) + .collect(); + + let pushed_blocks: HashSet<_> = self + .pushed_blocks + .iter() + .map(|(_, block_list)| { + block_list + .iter() + .map(|block_data| { + block_data + .blocks + .iter() + .map(|block_datum| { + StacksBlockId::new(&block_datum.0, &block_datum.1.block_hash()) + }) + .collect::>() + }) + .collect::>>() + }) + .collect::>>>() + .into_iter() + .flatten() + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }); + + let uploaded_blocks: HashSet<_> = self + .uploaded_blocks + .iter() + .map(|blk_data| { + blk_data + .blocks + .iter() + .map(|blk| StacksBlockId::new(&blk.0, &blk.1.block_hash())) + .collect::>() + }) + .collect::>>() + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }); + + blocks.extend(pushed_blocks.into_iter()); + blocks.extend(uploaded_blocks.into_iter()); + blocks + } + + /// Get the set of all microblocks represented + fn all_microblock_hashes(&self) -> HashSet { + let mut mblocks: HashSet<_> = self + .confirmed_microblocks + .iter() + .map(|(_, mblocks, _)| { + mblocks + .iter() + .map(|mblk| mblk.block_hash()) + .collect::>() + }) + .collect::>>() + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }); + + let pushed_microblocks: HashSet<_> = self + .pushed_microblocks + .iter() + .map(|(_, mblock_list)| { + mblock_list + .iter() + .map(|(_, mblock_data)| { + mblock_data + .microblocks + .iter() + .map(|mblock| mblock.block_hash()) + .collect::>() + }) + .collect::>>() + }) + .collect::>>>() + .into_iter() + .flatten() + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }); + + let uploaded_microblocks: HashSet<_> = self + .uploaded_microblocks + .iter() + .map(|mblk_data| { + mblk_data + .microblocks + .iter() + .map(|mblk| mblk.block_hash()) + .collect::>() + }) + .collect::>>() + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }); + + mblocks.extend(pushed_microblocks.into_iter()); + mblocks.extend(uploaded_microblocks.into_iter()); + mblocks + } + + /// Get the set of all nakamoto blocks represented + fn all_nakamoto_block_ids(&self) -> HashSet { + let mut naka_block_ids: HashSet<_> = self + .nakamoto_blocks + .iter() + .map(|(_, nblk)| nblk.block_id()) + .collect(); + + let pushed_nakamoto_blocks: HashSet<_> = self + .pushed_nakamoto_blocks + .iter() + .map(|(_, naka_blocks_list)| { + naka_blocks_list + .iter() + .map(|(_, naka_blocks)| { + naka_blocks + .blocks + .iter() + .map(|nblk| nblk.block_id()) + .collect::>() + }) + .collect::>>() + }) + .collect::>>>() + .into_iter() + .flatten() + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }); + + let uploaded_nakamoto_blocks: HashSet<_> = self + .uploaded_nakamoto_blocks + .iter() + .map(|nblk| nblk.block_id()) + .collect(); + + naka_block_ids.extend(pushed_nakamoto_blocks.into_iter()); + naka_block_ids.extend(uploaded_nakamoto_blocks.into_iter()); + naka_block_ids + } + + /// Get the set of all txids represented + fn all_txids(&self) -> HashSet { + let mut txids: HashSet<_> = self + .uploaded_transactions + .iter() + .map(|tx| tx.txid()) + .collect(); + let pushed_txids: HashSet<_> = self + .pushed_transactions + .iter() + .map(|(_, tx_list)| { + tx_list + .iter() + .map(|(_, tx)| tx.txid()) + .collect::>() + }) + .collect::>>() + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }); + + let synced_txids: HashSet<_> = self + .synced_transactions + .iter() + .map(|tx| tx.txid()) + .collect(); + + txids.extend(pushed_txids.into_iter()); + txids.extend(synced_txids.into_iter()); + txids + } + + /// Get all unhandled message signatures. + /// This is unique per message. + fn all_msg_sigs(&self) -> HashSet { + self.unhandled_messages + .iter() + .map(|(_, msgs)| { + msgs.iter() + .map(|msg| msg.preamble.signature.clone()) + .collect::>() + }) + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }) + } + + /// Merge self into `newer`, and return `newer`. + /// deduplicate messages when possible. + pub fn update(mut self, mut newer: NetworkResult) -> Self { + // merge unhandled messaegs, but deduplicate + let newer_msgs = newer.all_msg_sigs(); + for (nk, mut msgs) in self.unhandled_messages.drain() { + msgs.retain(|msg| { + let retain = !newer_msgs.contains(&msg.preamble.signature); + if !retain { + debug!( + "Drop duplicate p2p message {} seq {}", + &msg.get_message_name(), + &msg.preamble.seq + ); + } + retain + }); + if let Some(newer_msgs) = newer.unhandled_messages.get_mut(&nk) { + newer_msgs.append(&mut msgs); + } else { + newer.unhandled_messages.insert(nk, msgs); + } + } + + let newer_blocks = newer.all_block_ids(); + let newer_microblocks = newer.all_microblock_hashes(); + let newer_naka_blocks = newer.all_nakamoto_block_ids(); + let newer_txids = newer.all_txids(); + + // only retain blocks not found in `newer` + self.blocks.retain(|(ch, blk, _)| { + let block_id = StacksBlockId::new(&ch, &blk.block_hash()); + let retain = !newer_blocks.contains(&block_id); + if !retain { + debug!("Drop duplicate downloaded block {}", &block_id); + } + retain + }); + newer.blocks.append(&mut self.blocks); + + // merge microblocks, but deduplicate + self.confirmed_microblocks + .retain_mut(|(_, ref mut mblocks, _)| { + mblocks.retain(|mblk| { + let retain = !newer_microblocks.contains(&mblk.block_hash()); + if !retain { + debug!( + "Drop duplicate downloaded microblock {}", + &mblk.block_hash() + ); + } + retain + }); + mblocks.len() > 0 + }); + newer + .confirmed_microblocks + .append(&mut self.confirmed_microblocks); + + // merge nakamoto blocks, but deduplicate + self.nakamoto_blocks.retain(|_, nblk| { + let retain = !newer_naka_blocks.contains(&nblk.block_id()); + if !retain { + debug!( + "Drop duplicate downloaded nakamoto block {}", + &nblk.block_id() + ); + } + retain + }); + newer.nakamoto_blocks.extend(self.nakamoto_blocks.drain()); + + // merge pushed transactions, but deduplicate + for (nk, mut tx_data) in self.pushed_transactions.drain() { + tx_data.retain(|(_, tx)| { + let retain = !newer_txids.contains(&tx.txid()); + if !retain { + debug!("Drop duplicate pushed transaction {}", &tx.txid()); + } + retain + }); + if tx_data.len() == 0 { + continue; + } + + if let Some(newer_tx_data) = newer.pushed_transactions.get_mut(&nk) { + newer_tx_data.append(&mut tx_data); + } else { + newer.pushed_transactions.insert(nk, tx_data); + } + } + + // merge pushed blocks, but deduplicate + for (nk, mut block_list) in self.pushed_blocks.drain() { + block_list.retain_mut(|ref mut block_data| { + block_data.blocks.retain(|blk_datum| { + let block_id = StacksBlockId::new(&blk_datum.0, &blk_datum.1.block_hash()); + let retain = !newer_blocks.contains(&block_id); + if !retain { + debug!("Drop duplicate pushed block {}", &block_id); + } + retain + }); + block_data.blocks.len() > 0 + }); + if block_list.len() == 0 { + continue; + } + + if let Some(newer_block_data) = newer.pushed_blocks.get_mut(&nk) { + newer_block_data.append(&mut block_list); + } else { + newer.pushed_blocks.insert(nk, block_list); + } + } + + // merge pushed microblocks, but deduplicate + for (nk, mut microblock_data) in self.pushed_microblocks.drain() { + microblock_data.retain_mut(|(_, ref mut mblock_data)| { + mblock_data.microblocks.retain(|mblk| { + let retain = !newer_microblocks.contains(&mblk.block_hash()); + if !retain { + debug!("Drop duplicate pushed microblock {}", &mblk.block_hash()); + } + retain + }); + mblock_data.microblocks.len() > 0 + }); + if microblock_data.len() == 0 { + continue; + } + + if let Some(newer_microblock_data) = newer.pushed_microblocks.get_mut(&nk) { + newer_microblock_data.append(&mut microblock_data); + } else { + newer.pushed_microblocks.insert(nk, microblock_data); + } + } + + // merge pushed nakamoto blocks, but deduplicate + for (nk, mut nakamoto_block_data) in self.pushed_nakamoto_blocks.drain() { + nakamoto_block_data.retain_mut(|(_, ref mut naka_blocks)| { + naka_blocks.blocks.retain(|nblk| { + let retain = !newer_naka_blocks.contains(&nblk.block_id()); + if !retain { + debug!("Drop duplicate pushed nakamoto block {}", &nblk.block_id()); + } + retain + }); + naka_blocks.blocks.len() > 0 + }); + if nakamoto_block_data.len() == 0 { + continue; + } + + if let Some(newer_nakamoto_data) = newer.pushed_nakamoto_blocks.get_mut(&nk) { + newer_nakamoto_data.append(&mut nakamoto_block_data); + } else { + newer.pushed_nakamoto_blocks.insert(nk, nakamoto_block_data); + } + } + + // merge uploaded data, but deduplicate + self.uploaded_transactions.retain(|tx| { + let retain = !newer_txids.contains(&tx.txid()); + if !retain { + debug!("Drop duplicate uploaded transaction {}", &tx.txid()); + } + retain + }); + self.uploaded_blocks.retain_mut(|ref mut blk_data| { + blk_data.blocks.retain(|blk| { + let block_id = StacksBlockId::new(&blk.0, &blk.1.block_hash()); + let retain = !newer_blocks.contains(&block_id); + if !retain { + debug!("Drop duplicate uploaded block {}", &block_id); + } + retain + }); + + blk_data.blocks.len() > 0 + }); + self.uploaded_microblocks.retain_mut(|ref mut mblock_data| { + mblock_data.microblocks.retain(|mblk| { + let retain = !newer_microblocks.contains(&mblk.block_hash()); + if !retain { + debug!("Drop duplicate uploaded microblock {}", &mblk.block_hash()); + } + retain + }); + + mblock_data.microblocks.len() > 0 + }); + self.uploaded_nakamoto_blocks.retain(|nblk| { + let retain = !newer_naka_blocks.contains(&nblk.block_id()); + if !retain { + debug!( + "Drop duplicate uploaded nakamoto block {}", + &nblk.block_id() + ); + } + retain + }); + + newer + .uploaded_transactions + .append(&mut self.uploaded_transactions); + newer.uploaded_blocks.append(&mut self.uploaded_blocks); + newer + .uploaded_microblocks + .append(&mut self.uploaded_microblocks); + newer + .uploaded_nakamoto_blocks + .append(&mut self.uploaded_nakamoto_blocks); + + // merge uploaded/pushed stackerdb, but drop stale versions + let newer_stackerdb_chunk_versions: HashMap<_, _> = newer + .uploaded_stackerdb_chunks + .iter() + .chain(newer.pushed_stackerdb_chunks.iter()) + .map(|chunk| { + ( + ( + chunk.contract_id.clone(), + chunk.rc_consensus_hash.clone(), + chunk.chunk_data.slot_id, + ), + chunk.chunk_data.slot_version, + ) + }) + .collect(); + + self.uploaded_stackerdb_chunks.retain(|push_chunk| { + if push_chunk.rc_consensus_hash != newer.rc_consensus_hash { + debug!( + "Drop pushed StackerDB chunk for {} due to stale view ({} != {}): {:?}", + &push_chunk.contract_id, + &push_chunk.rc_consensus_hash, + &newer.rc_consensus_hash, + &push_chunk.chunk_data + ); + return false; + } + if let Some(version) = newer_stackerdb_chunk_versions.get(&( + push_chunk.contract_id.clone(), + push_chunk.rc_consensus_hash.clone(), + push_chunk.chunk_data.slot_id, + )) { + let retain = push_chunk.chunk_data.slot_version > *version; + if !retain { + debug!( + "Drop pushed StackerDB chunk for {} due to stale version: {:?}", + &push_chunk.contract_id, &push_chunk.chunk_data + ); + } + retain + } else { + true + } + }); + + self.pushed_stackerdb_chunks.retain(|push_chunk| { + if push_chunk.rc_consensus_hash != newer.rc_consensus_hash { + debug!( + "Drop uploaded StackerDB chunk for {} due to stale view ({} != {}): {:?}", + &push_chunk.contract_id, + &push_chunk.rc_consensus_hash, + &newer.rc_consensus_hash, + &push_chunk.chunk_data + ); + return false; + } + if let Some(version) = newer_stackerdb_chunk_versions.get(&( + push_chunk.contract_id.clone(), + push_chunk.rc_consensus_hash.clone(), + push_chunk.chunk_data.slot_id, + )) { + let retain = push_chunk.chunk_data.slot_version > *version; + if !retain { + debug!( + "Drop uploaded StackerDB chunk for {} due to stale version: {:?}", + &push_chunk.contract_id, &push_chunk.chunk_data + ); + } + retain + } else { + true + } + }); + + newer + .uploaded_stackerdb_chunks + .append(&mut self.uploaded_stackerdb_chunks); + newer + .pushed_stackerdb_chunks + .append(&mut self.pushed_stackerdb_chunks); + + // dedup sync'ed transactions + self.synced_transactions.retain(|tx| { + let retain = !newer_txids.contains(&tx.txid()); + if !retain { + debug!("Drop duplicate sync'ed transaction {}", &tx.txid()); + } + retain + }); + + newer + .synced_transactions + .append(&mut self.synced_transactions); + + // no dedup here, but do merge + newer + .stacker_db_sync_results + .append(&mut self.stacker_db_sync_results); + newer.attachments.append(&mut self.attachments); + + newer + } + pub fn has_blocks(&self) -> bool { self.blocks.len() > 0 || self.pushed_blocks.len() > 0 } From 596d41da1ca2f842867a7241615c83ad7d30a9ae Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 31 Oct 2024 10:51:01 -0400 Subject: [PATCH 029/109] chore: make StackerDBSyncResult Debug and PartialEq --- stackslib/src/net/stackerdb/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 57d1a427dc..bbbec21290 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -155,7 +155,7 @@ pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; pub const MINER_SLOT_COUNT: u32 = 2; /// Final result of synchronizing state with a remote set of DB replicas -#[derive(Clone)] +#[derive(Clone, PartialEq, Debug)] pub struct StackerDBSyncResult { /// which contract this is a replica for pub contract_id: QualifiedContractIdentifier, From 1fc9d72afc7b8a1d46d177a200c5d92cd3d3c8fb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 31 Oct 2024 10:51:19 -0400 Subject: [PATCH 030/109] chore: test NetworkResult::update() --- stackslib/src/net/tests/mod.rs | 681 ++++++++++++++++++++++++++++++++- 1 file changed, 677 insertions(+), 4 deletions(-) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 6e61e7e610..11310ecd52 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -22,18 +22,24 @@ pub mod mempool; pub mod neighbors; pub mod relay; -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use clarity::vm::clarity::ClarityConnection; -use clarity::vm::types::PrincipalData; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use libstackerdb::StackerDBChunkData; use rand::prelude::SliceRandom; use rand::{thread_rng, Rng, RngCore}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; +use stacks_common::bitvec::BitVec; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use stacks_common::types::chainstate::{ - StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, + BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, + StacksPublicKey, TrieHash, }; +use stacks_common::types::net::PeerAddress; use stacks_common::types::{Address, StacksEpochId}; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::VRFProof; use crate::burnchains::PoxConstants; @@ -45,7 +51,7 @@ use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::test::{ key_to_stacks_addr, make_pox_4_lockup, make_pox_4_lockup_chain_id, make_signer_key_signature, @@ -54,8 +60,10 @@ use crate::chainstate::stacks::boot::test::{ use crate::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; +use crate::chainstate::stacks::db::blocks::test::make_empty_coinbase_block; use crate::chainstate::stacks::db::{MinerPaymentTxFees, StacksAccount, StacksChainState}; use crate::chainstate::stacks::events::TransactionOrigin; +use crate::chainstate::stacks::test::make_codec_test_microblock; use crate::chainstate::stacks::{ CoinbasePayload, StacksTransaction, StacksTransactionSigner, TenureChangeCause, TenureChangePayload, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, @@ -66,6 +74,10 @@ use crate::core::{StacksEpoch, StacksEpochExtension}; use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; +use crate::net::{ + BlocksData, BlocksDatum, MicroblocksData, NakamotoBlocksData, NeighborKey, NetworkResult, + PingData, StackerDBPushChunkData, StacksMessage, StacksMessageType, +}; use crate::util_lib::boot::boot_code_id; use crate::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; @@ -1125,3 +1137,664 @@ fn test_boot_nakamoto_peer() { let observer = TestEventObserver::new(); let (peer, other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(&observer)); } + +#[test] +fn test_network_result_update() { + let mut network_result_1 = NetworkResult::new( + StacksBlockId([0x11; 32]), + 1, + 1, + 1, + 1, + 1, + ConsensusHash([0x11; 20]), + HashMap::new(), + ); + + let mut network_result_2 = NetworkResult::new( + StacksBlockId([0x22; 32]), + 2, + 2, + 2, + 2, + 2, + ConsensusHash([0x22; 20]), + HashMap::new(), + ); + + let nk1 = NeighborKey { + peer_version: 1, + network_id: 1, + addrbytes: PeerAddress([0x11; 16]), + port: 1, + }; + + let nk2 = NeighborKey { + peer_version: 2, + network_id: 2, + addrbytes: PeerAddress([0x22; 16]), + port: 2, + }; + + let msg1 = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x11; 32]), + 1, + &BurnchainHeaderHash([0x11; 32]), + StacksMessageType::Ping(PingData { nonce: 1 }), + ); + + let mut msg2 = StacksMessage::new( + 2, + 2, + 2, + &BurnchainHeaderHash([0x22; 32]), + 2, + &BurnchainHeaderHash([0x22; 32]), + StacksMessageType::Ping(PingData { nonce: 2 }), + ); + msg2.sign(2, &StacksPrivateKey::new()).unwrap(); + + let pkey_1 = StacksPrivateKey::new(); + let pkey_2 = StacksPrivateKey::new(); + + let pushed_pkey_1 = StacksPrivateKey::new(); + let pushed_pkey_2 = StacksPrivateKey::new(); + + let uploaded_pkey_1 = StacksPrivateKey::new(); + let uploaded_pkey_2 = StacksPrivateKey::new(); + + let blk1 = make_empty_coinbase_block(&pkey_1); + let blk2 = make_empty_coinbase_block(&pkey_2); + + let pushed_blk1 = make_empty_coinbase_block(&pushed_pkey_1); + let pushed_blk2 = make_empty_coinbase_block(&pushed_pkey_2); + + let uploaded_blk1 = make_empty_coinbase_block(&uploaded_pkey_1); + let uploaded_blk2 = make_empty_coinbase_block(&uploaded_pkey_2); + + let mblk1 = make_codec_test_microblock(1); + let mblk2 = make_codec_test_microblock(2); + + let pushed_mblk1 = make_codec_test_microblock(3); + let pushed_mblk2 = make_codec_test_microblock(4); + + let uploaded_mblk1 = make_codec_test_microblock(5); + let uploaded_mblk2 = make_codec_test_microblock(6); + + let pushed_tx1 = make_codec_test_microblock(3).txs[2].clone(); + let pushed_tx2 = make_codec_test_microblock(4).txs[3].clone(); + + let uploaded_tx1 = make_codec_test_microblock(5).txs[4].clone(); + let uploaded_tx2 = make_codec_test_microblock(6).txs[5].clone(); + + let synced_tx1 = make_codec_test_microblock(7).txs[6].clone(); + let synced_tx2 = make_codec_test_microblock(8).txs[7].clone(); + + let naka_header_1 = NakamotoBlockHeader { + version: 1, + chain_length: 1, + burn_spent: 1, + consensus_hash: ConsensusHash([0x01; 20]), + parent_block_id: StacksBlockId([0x01; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x01; 32]), + state_index_root: TrieHash([0x01; 32]), + timestamp: 1, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_header_2 = NakamotoBlockHeader { + version: 2, + chain_length: 2, + burn_spent: 2, + consensus_hash: ConsensusHash([0x02; 20]), + parent_block_id: StacksBlockId([0x02; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x02; 32]), + state_index_root: TrieHash([0x02; 32]), + timestamp: 2, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_pushed_header_1 = NakamotoBlockHeader { + version: 3, + chain_length: 3, + burn_spent: 3, + consensus_hash: ConsensusHash([0x03; 20]), + parent_block_id: StacksBlockId([0x03; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x03; 32]), + state_index_root: TrieHash([0x03; 32]), + timestamp: 3, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_pushed_header_2 = NakamotoBlockHeader { + version: 4, + chain_length: 4, + burn_spent: 4, + consensus_hash: ConsensusHash([0x04; 20]), + parent_block_id: StacksBlockId([0x04; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x04; 32]), + state_index_root: TrieHash([0x04; 32]), + timestamp: 4, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_uploaded_header_1 = NakamotoBlockHeader { + version: 5, + chain_length: 5, + burn_spent: 5, + consensus_hash: ConsensusHash([0x05; 20]), + parent_block_id: StacksBlockId([0x05; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x05; 32]), + state_index_root: TrieHash([0x05; 32]), + timestamp: 5, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_uploaded_header_2 = NakamotoBlockHeader { + version: 6, + chain_length: 6, + burn_spent: 6, + consensus_hash: ConsensusHash([0x06; 20]), + parent_block_id: StacksBlockId([0x06; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), + state_index_root: TrieHash([0x06; 32]), + timestamp: 6, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let nblk1 = NakamotoBlock { + header: naka_header_1.clone(), + txs: vec![], + }; + let nblk2 = NakamotoBlock { + header: naka_header_2.clone(), + txs: vec![], + }; + + let pushed_nblk1 = NakamotoBlock { + header: naka_pushed_header_1.clone(), + txs: vec![], + }; + let pushed_nblk2 = NakamotoBlock { + header: naka_pushed_header_2.clone(), + txs: vec![], + }; + + let uploaded_nblk1 = NakamotoBlock { + header: naka_uploaded_header_1.clone(), + txs: vec![], + }; + let uploaded_nblk2 = NakamotoBlock { + header: naka_uploaded_header_2.clone(), + txs: vec![], + }; + + let pushed_stackerdb_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x11; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![1], + }, + }; + + let pushed_stackerdb_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x22; 20]), + chunk_data: StackerDBChunkData { + slot_id: 2, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![2], + }, + }; + + let uploaded_stackerdb_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + chunk_data: StackerDBChunkData { + slot_id: 3, + slot_version: 3, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let uploaded_stackerdb_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x44; 20]), + chunk_data: StackerDBChunkData { + slot_id: 4, + slot_version: 4, + sig: MessageSignature::empty(), + data: vec![4], + }, + }; + + network_result_1 + .unhandled_messages + .insert(nk1.clone(), vec![msg1.clone()]); + network_result_1 + .blocks + .push((ConsensusHash([0x11; 20]), blk1.clone(), 1)); + network_result_1.confirmed_microblocks.push(( + ConsensusHash([0x11; 20]), + vec![mblk1.clone()], + 1, + )); + network_result_1 + .nakamoto_blocks + .insert(nblk1.block_id(), nblk1.clone()); + network_result_1 + .pushed_transactions + .insert(nk1.clone(), vec![(vec![], pushed_tx1.clone())]); + network_result_1.pushed_blocks.insert( + nk1.clone(), + vec![BlocksData { + blocks: vec![BlocksDatum(ConsensusHash([0x11; 20]), pushed_blk1.clone())], + }], + ); + network_result_1.pushed_microblocks.insert( + nk1.clone(), + vec![( + vec![], + MicroblocksData { + index_anchor_block: StacksBlockId([0x11; 32]), + microblocks: vec![pushed_mblk1.clone()], + }, + )], + ); + network_result_1.pushed_nakamoto_blocks.insert( + nk1.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![pushed_nblk1], + }, + )], + ); + network_result_1 + .uploaded_transactions + .push(uploaded_tx1.clone()); + network_result_1.uploaded_blocks.push(BlocksData { + blocks: vec![BlocksDatum( + ConsensusHash([0x11; 20]), + uploaded_blk1.clone(), + )], + }); + network_result_1.uploaded_microblocks.push(MicroblocksData { + index_anchor_block: StacksBlockId([0x11; 32]), + microblocks: vec![uploaded_mblk1.clone()], + }); + network_result_1 + .uploaded_nakamoto_blocks + .push(uploaded_nblk1.clone()); + network_result_1 + .pushed_stackerdb_chunks + .push(pushed_stackerdb_chunk_1.clone()); + network_result_1 + .uploaded_stackerdb_chunks + .push(uploaded_stackerdb_chunk_1.clone()); + network_result_1.synced_transactions.push(synced_tx1); + + network_result_2 + .unhandled_messages + .insert(nk2.clone(), vec![msg2.clone()]); + network_result_2 + .blocks + .push((ConsensusHash([0x22; 20]), blk2.clone(), 2)); + network_result_2.confirmed_microblocks.push(( + ConsensusHash([0x22; 20]), + vec![mblk2.clone()], + 2, + )); + network_result_2 + .nakamoto_blocks + .insert(nblk2.block_id(), nblk2.clone()); + network_result_2 + .pushed_transactions + .insert(nk2.clone(), vec![(vec![], pushed_tx2.clone())]); + network_result_2.pushed_blocks.insert( + nk2.clone(), + vec![BlocksData { + blocks: vec![BlocksDatum(ConsensusHash([0x22; 20]), pushed_blk2.clone())], + }], + ); + network_result_2.pushed_microblocks.insert( + nk2.clone(), + vec![( + vec![], + MicroblocksData { + index_anchor_block: StacksBlockId([0x22; 32]), + microblocks: vec![pushed_mblk2.clone()], + }, + )], + ); + network_result_2.pushed_nakamoto_blocks.insert( + nk2.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![pushed_nblk2], + }, + )], + ); + network_result_2 + .uploaded_transactions + .push(uploaded_tx2.clone()); + network_result_2.uploaded_blocks.push(BlocksData { + blocks: vec![BlocksDatum( + ConsensusHash([0x22; 20]), + uploaded_blk2.clone(), + )], + }); + network_result_2.uploaded_microblocks.push(MicroblocksData { + index_anchor_block: StacksBlockId([0x22; 32]), + microblocks: vec![uploaded_mblk2.clone()], + }); + network_result_2 + .uploaded_nakamoto_blocks + .push(uploaded_nblk2.clone()); + network_result_2 + .pushed_stackerdb_chunks + .push(pushed_stackerdb_chunk_2.clone()); + network_result_2 + .uploaded_stackerdb_chunks + .push(uploaded_stackerdb_chunk_2.clone()); + network_result_2.synced_transactions.push(synced_tx2); + + let mut network_result_union = network_result_2.clone(); + let mut n1 = network_result_1.clone(); + network_result_union + .unhandled_messages + .extend(n1.unhandled_messages.into_iter()); + network_result_union.blocks.append(&mut n1.blocks); + network_result_union + .confirmed_microblocks + .append(&mut n1.confirmed_microblocks); + network_result_union + .nakamoto_blocks + .extend(n1.nakamoto_blocks.into_iter()); + network_result_union + .pushed_transactions + .extend(n1.pushed_transactions.into_iter()); + network_result_union + .pushed_blocks + .extend(n1.pushed_blocks.into_iter()); + network_result_union + .pushed_microblocks + .extend(n1.pushed_microblocks.into_iter()); + network_result_union + .pushed_nakamoto_blocks + .extend(n1.pushed_nakamoto_blocks.into_iter()); + network_result_union + .uploaded_transactions + .append(&mut n1.uploaded_transactions); + network_result_union + .uploaded_blocks + .append(&mut n1.uploaded_blocks); + network_result_union + .uploaded_microblocks + .append(&mut n1.uploaded_microblocks); + network_result_union + .uploaded_nakamoto_blocks + .append(&mut n1.uploaded_nakamoto_blocks); + // stackerdb chunks from n1 get dropped since their rc_consensus_hash no longer matches + network_result_union + .synced_transactions + .append(&mut n1.synced_transactions); + + // update is idempotent + let old = network_result_1.clone(); + let new = network_result_1.clone(); + assert_eq!(old.update(new), network_result_1); + + // disjoint results get unioned, except for stackerdb chunks + let old = network_result_1.clone(); + let new = network_result_2.clone(); + assert_eq!(old.update(new), network_result_union); + + // merging a subset is idempotent + assert_eq!( + network_result_1 + .clone() + .update(network_result_union.clone()), + network_result_union + ); + assert_eq!( + network_result_2 + .clone() + .update(network_result_union.clone()), + network_result_union + ); + + // stackerdb uploaded chunks get consolidated correctly + let mut old = NetworkResult::new( + StacksBlockId([0xaa; 32]), + 10, + 10, + 10, + 10, + 10, + ConsensusHash([0xaa; 20]), + HashMap::new(), + ); + let mut new = old.clone(); + + let old_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 2, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + old.uploaded_stackerdb_chunks.push(old_chunk_1.clone()); + // replaced + new.uploaded_stackerdb_chunks.push(new_chunk_1.clone()); + // included + new.uploaded_stackerdb_chunks.push(new_chunk_2.clone()); + + assert_eq!( + old.update(new).uploaded_stackerdb_chunks, + vec![new_chunk_1.clone(), new_chunk_2.clone()] + ); + + // stackerdb pushed chunks get consolidated correctly + let mut old = NetworkResult::new( + StacksBlockId([0xaa; 32]), + 10, + 10, + 10, + 10, + 10, + ConsensusHash([0xaa; 20]), + HashMap::new(), + ); + let mut new = old.clone(); + + let old_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 2, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + old.pushed_stackerdb_chunks.push(old_chunk_1.clone()); + // replaced + new.pushed_stackerdb_chunks.push(new_chunk_1.clone()); + // included + new.pushed_stackerdb_chunks.push(new_chunk_2.clone()); + + assert_eq!( + old.update(new).pushed_stackerdb_chunks, + vec![new_chunk_1.clone(), new_chunk_2.clone()] + ); + + // nakamoto blocks obtained via download, upload, or pushed get consoldated + let mut old = NetworkResult::new( + StacksBlockId([0xbb; 32]), + 11, + 11, + 11, + 11, + 11, + ConsensusHash([0xbb; 20]), + HashMap::new(), + ); + old.nakamoto_blocks.insert(nblk1.block_id(), nblk1.clone()); + old.pushed_nakamoto_blocks.insert( + nk1.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![nblk1.clone()], + }, + )], + ); + old.uploaded_nakamoto_blocks.push(nblk1.clone()); + + let new = NetworkResult::new( + StacksBlockId([0xbb; 32]), + 11, + 11, + 11, + 11, + 11, + ConsensusHash([0xbb; 20]), + HashMap::new(), + ); + + let mut new_pushed = new.clone(); + let mut new_uploaded = new.clone(); + let mut new_downloaded = new.clone(); + + new_downloaded + .nakamoto_blocks + .insert(nblk1.block_id(), nblk1.clone()); + new_pushed.pushed_nakamoto_blocks.insert( + nk2.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![nblk1.clone()], + }, + )], + ); + new_uploaded.uploaded_nakamoto_blocks.push(nblk1.clone()); + + debug!("===="); + let updated_downloaded = old.clone().update(new_downloaded); + assert_eq!(updated_downloaded.nakamoto_blocks.len(), 1); + assert_eq!( + updated_downloaded + .nakamoto_blocks + .get(&nblk1.block_id()) + .unwrap(), + &nblk1 + ); + assert_eq!(updated_downloaded.pushed_nakamoto_blocks.len(), 0); + assert_eq!(updated_downloaded.uploaded_nakamoto_blocks.len(), 0); + + debug!("===="); + let updated_pushed = old.clone().update(new_pushed); + assert_eq!(updated_pushed.nakamoto_blocks.len(), 0); + assert_eq!(updated_pushed.pushed_nakamoto_blocks.len(), 1); + assert_eq!( + updated_pushed + .pushed_nakamoto_blocks + .get(&nk2) + .unwrap() + .len(), + 1 + ); + assert_eq!( + updated_pushed.pushed_nakamoto_blocks.get(&nk2).unwrap()[0] + .1 + .blocks + .len(), + 1 + ); + assert_eq!( + updated_pushed.pushed_nakamoto_blocks.get(&nk2).unwrap()[0] + .1 + .blocks[0], + nblk1 + ); + assert_eq!(updated_pushed.uploaded_nakamoto_blocks.len(), 0); + + debug!("===="); + let updated_uploaded = old.clone().update(new_uploaded); + assert_eq!(updated_uploaded.nakamoto_blocks.len(), 0); + assert_eq!(updated_uploaded.pushed_nakamoto_blocks.len(), 0); + assert_eq!(updated_uploaded.uploaded_nakamoto_blocks.len(), 1); + assert_eq!(updated_uploaded.uploaded_nakamoto_blocks[0], nblk1); +} From 225ada1e96c81046b8e3ae9291510284f9a00878 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 31 Oct 2024 10:51:34 -0400 Subject: [PATCH 031/109] chore: remove logic to drain the network result channel (it's not needed), and merge un-sent NetworkResult's in order to keep the queue length bound to at most one outstanding NetworkResult --- testnet/stacks-node/src/nakamoto_node/peer.rs | 80 +++++++---------- .../stacks-node/src/nakamoto_node/relayer.rs | 87 ++++++------------- 2 files changed, 57 insertions(+), 110 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 4e208a88cc..9dc6ff639b 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::VecDeque; use std::net::SocketAddr; use std::sync::mpsc::TrySendError; use std::thread; @@ -54,11 +53,9 @@ pub struct PeerThread { chainstate: StacksChainState, /// handle to the mempool DB mempool: MemPoolDB, - /// buffer of relayer commands with block data that couldn't be sent to the relayer just yet - /// (i.e. due to backpressure). We track this separately, instead of just using a bigger - /// channel, because we need to know when backpressure occurs in order to throttle the p2p - /// thread's downloader. - results_with_data: VecDeque, + /// Buffered network result relayer command. + /// P2P network results are consolidated into a single directive. + results_with_data: Option, /// total number of p2p state-machine passes so far. Used to signal when to download the next /// reward cycle of blocks num_p2p_state_machine_passes: u64, @@ -199,7 +196,7 @@ impl PeerThread { sortdb, chainstate, mempool, - results_with_data: VecDeque::new(), + results_with_data: None, num_p2p_state_machine_passes: 0, num_inv_sync_passes: 0, num_download_passes: 0, @@ -238,7 +235,18 @@ impl PeerThread { ) -> bool { // initial block download? let ibd = self.globals.sync_comms.get_ibd(); - let download_backpressure = self.results_with_data.len() > 0; + let download_backpressure = self + .results_with_data + .as_ref() + .map(|res| { + if let RelayerDirective::HandleNetResult(netres) = &res { + netres.has_block_data_to_store() + } else { + false + } + }) + .unwrap_or(false); + let poll_ms = if !download_backpressure && self.net.has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( @@ -282,7 +290,6 @@ impl PeerThread { }; match p2p_res { Ok(network_result) => { - let mut have_update = false; if self.num_p2p_state_machine_passes < network_result.num_state_machine_passes { // p2p state-machine did a full pass. Notify anyone listening. self.globals.sync_comms.notify_p2p_state_pass(); @@ -293,46 +300,28 @@ impl PeerThread { // inv-sync state-machine did a full pass. Notify anyone listening. self.globals.sync_comms.notify_inv_sync_pass(); self.num_inv_sync_passes = network_result.num_inv_sync_passes; - - // the relayer cares about the number of inventory passes, so pass this along - have_update = true; } if self.num_download_passes < network_result.num_download_passes { // download state-machine did a full pass. Notify anyone listening. self.globals.sync_comms.notify_download_pass(); self.num_download_passes = network_result.num_download_passes; - - // the relayer cares about the number of download passes, so pass this along - have_update = true; } - if ((ibd || download_backpressure) && network_result.has_block_data_to_store()) - || (!ibd && network_result.has_data_to_store()) - || self.last_burn_block_height != network_result.burn_height - || have_update - { - // pass along if we have blocks, microblocks, or transactions, or a status - // update on the network's view of the burnchain - self.last_burn_block_height = network_result.burn_height; - self.results_with_data - .push_back(RelayerDirective::HandleNetResult(network_result)); - - self.globals.raise_initiative( - "PeerThread::run_one_pass() with data-bearing network result".to_string(), - ); + self.last_burn_block_height = network_result.burn_height; + if let Some(res) = self.results_with_data.take() { + if let RelayerDirective::HandleNetResult(netres) = res { + let new_res = netres.update(network_result); + self.results_with_data = Some(RelayerDirective::HandleNetResult(new_res)); + } + } else { + self.results_with_data = + Some(RelayerDirective::HandleNetResult(network_result)); } - if ibd || download_backpressure { - // if we have backpressure or we're in ibd, then only keep network results that tell us - // block data or information about download and inv passes - self.results_with_data.retain(|netres| match netres { - RelayerDirective::HandleNetResult(netres) => { - netres.has_block_data_to_store() - } - _ => true, - }) - } + self.globals.raise_initiative( + "PeerThread::run_one_pass() with data-bearing network result".to_string(), + ); } Err(e) => { // this is only reachable if the network is not instantiated correctly -- @@ -341,7 +330,7 @@ impl PeerThread { } }; - while let Some(next_result) = self.results_with_data.pop_front() { + if let Some(next_result) = self.results_with_data.take() { // have blocks, microblocks, and/or transactions (don't care about anything else), // or a directive to mine microblocks self.globals.raise_initiative( @@ -349,15 +338,13 @@ impl PeerThread { ); if let Err(e) = self.globals.relay_send.try_send(next_result) { debug!( - "P2P: {:?}: download backpressure detected (bufferred {})", + "P2P: {:?}: download backpressure detected", &self.net.local_peer, - self.results_with_data.len() ); match e { TrySendError::Full(directive) => { // don't lose this data -- just try it again - self.results_with_data.push_front(directive); - break; + self.results_with_data = Some(directive); } TrySendError::Disconnected(_) => { info!("P2P: Relayer hang up with p2p channel"); @@ -366,10 +353,7 @@ impl PeerThread { } } } else { - debug!( - "P2P: Dispatched result to Relayer! {} results remaining", - self.results_with_data.len() - ); + debug!("P2P: Dispatched result to Relayer!",); } } diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index eb7d7db079..407f1bde77 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -17,7 +17,7 @@ use core::fmt; use std::collections::HashSet; use std::fs; use std::io::Read; -use std::sync::mpsc::{Receiver, RecvTimeoutError, TryRecvError}; +use std::sync::mpsc::{Receiver, RecvTimeoutError}; use std::thread::JoinHandle; use std::time::{Duration, Instant}; @@ -1206,71 +1206,34 @@ impl RelayerThread { None }; - let mut handled = true; - let mut disconnect = false; - let mut try_recv = true; - let mut drained = false; - let raised_initiative_fmt = - format!("{}", raised_initiative.unwrap_or("relay_rcv".to_string())); - - debug!("Relayer: drain channel"); - // drain the channel - while !disconnect && handled && !drained { - let directive = if let Some(directive) = initiative_directive.take() { - debug!("Relayer: initiative from directive"); - directive - } else if try_recv { - // drain the channel - match relay_rcv.try_recv() { - Ok(directive) => { - debug!("Relayer: initiative from try_recv"); - directive - } - Err(TryRecvError::Empty) => { - try_recv = false; - continue; - } - Err(TryRecvError::Disconnected) => { - disconnect = true; - break; - } + let directive = if let Some(directive) = initiative_directive.take() { + debug!("Relayer: initiative from directive"); + directive + } else { + // channel was drained, so do a time-bound recv + match relay_rcv.recv_timeout(Duration::from_millis( + self.config.node.next_initiative_delay, + )) { + Ok(directive) => { + // only do this once, so we can call .initiative() again + debug!("Relayer: initiative from recv_timeout"); + directive } - } else { - // channel was drained, so do a time-bound recv - match relay_rcv.recv_timeout(Duration::from_millis( - self.config.node.next_initiative_delay, - )) { - Ok(directive) => { - // only do this once, so we can call .initiative() again - debug!("Relayer: initiative from recv_timeout"); - drained = true; - directive - } - Err(RecvTimeoutError::Timeout) => { - break; - } - Err(RecvTimeoutError::Disconnected) => { - disconnect = true; - break; - } + Err(RecvTimeoutError::Timeout) => { + continue; } - }; + Err(RecvTimeoutError::Disconnected) => { + break; + } + } + }; - debug!("Relayer: main loop directive"; - "try_recv" => %try_recv, - "drained" => %drained, - "directive" => %directive, - "raised_initiative" => %raised_initiative_fmt, - "timed_out" => %timed_out); + debug!("Relayer: main loop directive"; + "directive" => %directive, + "raised_initiative" => ?raised_initiative, + "timed_out" => %timed_out); - if !self.handle_directive(directive) { - handled = false; - break; - } - } - debug!("Relayer: drained channel"); - if disconnect || !handled { - info!("Exiting relayer main loop"); + if !self.handle_directive(directive) { break; } } From 39279989fd3e3cfedd295c53ea85eda3019e9629 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 31 Oct 2024 09:37:31 -0700 Subject: [PATCH 032/109] Fix block validation test and cleanup comment Signed-off-by: Jacinta Ferrant --- stacks-signer/src/config.rs | 2 +- stacks-signer/src/v0/signer.rs | 5 +-- testnet/stacks-node/src/tests/signer/v0.rs | 46 ++++++++++++---------- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index ec966539ca..30a44d2068 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -129,7 +129,7 @@ pub struct SignerConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, - /// How much time ot wait for a block proposal validation response before marking the block invalid + /// How much time to wait for a block proposal validation response before marking the block invalid pub block_proposal_validation_timeout: Duration, } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 95769eed57..32c4a41b7a 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -691,8 +691,7 @@ impl Signer { if block_info.state == BlockState::GloballyRejected || block_info.state == BlockState::GloballyAccepted { - // The block has already reached consensus. This should never really hit, but check just to be safe. - self.submitted_block_proposal = None; + // The block has already reached consensus. return; } block_info @@ -704,12 +703,10 @@ impl Signer { "signer_sighash" => %signature_sighash, "block_id" => %block_proposal.block.block_id(), ); - self.submitted_block_proposal = None; return; } Err(e) => { error!("{self}: Failed to lookup block in signer db: {e:?}",); - self.submitted_block_proposal = None; return; } }; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 87317a78f6..612e3a1dd4 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5623,7 +5623,7 @@ fn block_validation_response_timeout() { info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let timeout = Duration::from_secs(60); + let timeout = Duration::from_secs(30); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; @@ -5676,7 +5676,12 @@ fn block_validation_response_timeout() { }) .expect("Timed out waiting for block proposal"); - info!("------------------------- Propose Another Block -------------------------"); + assert!( + validation_stall_start.elapsed() < timeout, + "Test was too slow to propose another block before the timeout" + ); + + info!("------------------------- Propose Another Block Before Hitting the Timeout -------------------------"); let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), @@ -5686,26 +5691,27 @@ fn block_validation_response_timeout() { txs: vec![], }; + let info_before = get_chain_info(&signer_test.running_nodes.conf); // Propose a block to the signers that passes initial checks but will not be submitted to the stacks node due to the submission stall let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); block.header.pox_treatment = BitVec::ones(1).unwrap(); block.header.consensus_hash = view.cur_sortition.consensus_hash; - block.header.chain_length = 1; // We have mined 1 block so far + block.header.chain_length = info_before.stacks_tip_height + 1; let block_signer_signature_hash_1 = block.header.signer_signature_hash(); - let info_before = get_chain_info(&signer_test.running_nodes.conf); - signer_test.propose_block(block, Duration::from_secs(30)); + signer_test.propose_block(block, timeout); info!("------------------------- Waiting for Timeout -------------------------"); // Sleep the necessary timeout to make sure the validation times out. let elapsed = validation_stall_start.elapsed(); + let wait = timeout.saturating_sub(elapsed); + info!("Sleeping for {} ms", wait.as_millis()); std::thread::sleep(timeout.saturating_sub(elapsed)); info!("------------------------- Wait for Block Rejection Due to Timeout -------------------------"); // Verify the signers rejected the first block due to timeout - let start = Instant::now(); let mut rejected_signers = vec![]; - let mut saw_connectivity_complaint = false; + let start = Instant::now(); while rejected_signers.len() < num_signers { std::thread::sleep(Duration::from_secs(1)); let chunks = test_observer::get_stackerdb_chunks(); @@ -5714,32 +5720,30 @@ fn block_validation_response_timeout() { else { continue; }; - if let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { reason: _reason, reason_code, signer_signature_hash, signature, .. })) = message - { - if signer_signature_hash == block_signer_signature_hash_1 { - rejected_signers.push(signature); - if matches!(reason_code, RejectCode::ConnectivityIssues) { - saw_connectivity_complaint = true; - } + else { + continue; + }; + // We are waiting for the original block proposal which will have a diff signature to our + // second proposed block. + if signer_signature_hash != block_signer_signature_hash_1 { + rejected_signers.push(signature); + if matches!(reason_code, RejectCode::ConnectivityIssues) { + break; } } } assert!( - start.elapsed() <= Duration::from_secs(10), - "Timed out after waiting for response from signer" + start.elapsed() <= timeout, + "Timed out after waiting for ConenctivityIssues block rejection" ); } - - assert!( - saw_connectivity_complaint, - "We did not see the expected connectity rejection reason" - ); // Make sure our chain has still not advanced let info_after = get_chain_info(&signer_test.running_nodes.conf); assert_eq!(info_before, info_after); From 559b1d676a8204952c7244e030b76b23b0e11cc6 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 31 Oct 2024 09:43:29 -0700 Subject: [PATCH 033/109] dx: add set/get helper to TestFlag --- .../stacks-node/src/nakamoto_node/relayer.rs | 8 +----- testnet/stacks-node/src/run_loop/neon.rs | 15 ++++++++++- .../src/tests/nakamoto_integrations.rs | 8 +++--- testnet/stacks-node/src/tests/signer/v0.rs | 26 +++++-------------- 4 files changed, 26 insertions(+), 31 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index ef01f67f4b..bb03222502 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1003,13 +1003,7 @@ impl RelayerThread { #[cfg(test)] fn fault_injection_skip_block_commit(&self) -> bool { - self.globals - .counters - .naka_skip_commit_op - .0 - .lock() - .unwrap() - .unwrap_or(false) + self.globals.counters.naka_skip_commit_op.get() } #[cfg(not(test))] diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index a18a61988b..887c79b9ba 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -93,6 +93,19 @@ impl Default for TestFlag { } } +#[cfg(test)] +impl TestFlag { + /// Set the test flag to the given value + pub fn set(&self, value: bool) { + *self.0.lock().unwrap() = Some(value); + } + + /// Get the test flag value. Defaults to false if the flag is not set. + pub fn get(&self) -> bool { + self.0.lock().unwrap().unwrap_or(false) + } +} + #[derive(Clone, Default)] pub struct Counters { pub blocks_processed: RunLoopCounter, @@ -1034,7 +1047,7 @@ impl RunLoop { /// This function will block by looping infinitely. /// It will start the burnchain (separate thread), set-up a channel in /// charge of coordinating the new blocks coming from the burnchain and - /// the nodes, taking turns on tenures. + /// the nodes, taking turns on tenures. /// /// Returns `Option` so that data can be passed to `NakamotoNode` pub fn start( diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b5140a06ee..adca6411de 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -5051,7 +5051,7 @@ fn forked_tenure_is_ignored() { // Unpause the broadcast of Tenure B's block, do not submit commits, and do not allow blocks to // be processed - test_skip_commit_op.0.lock().unwrap().replace(true); + test_skip_commit_op.set(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); // Wait for a stacks block to be broadcasted. @@ -5104,7 +5104,7 @@ fn forked_tenure_is_ignored() { .expect("Mutex poisoned") .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || { - test_skip_commit_op.0.lock().unwrap().replace(false); + test_skip_commit_op.set(false); TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); @@ -7054,7 +7054,7 @@ fn continue_tenure_extend() { .get_stacks_blocks_processed(); info!("Pausing commit ops to trigger a tenure extend."); - test_skip_commit_op.0.lock().unwrap().replace(true); + test_skip_commit_op.set(true); next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); @@ -7153,7 +7153,7 @@ fn continue_tenure_extend() { } info!("Resuming commit ops to mine regular tenures."); - test_skip_commit_op.0.lock().unwrap().replace(false); + test_skip_commit_op.set(false); // Mine 15 more regular nakamoto tenures for _i in 0..15 { diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 234b73684a..e4a4deea37 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1003,10 +1003,7 @@ fn forked_tenure_testing( signer_test .running_nodes .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(true); + .set(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); // Wait for a stacks block to be broadcasted @@ -1083,10 +1080,7 @@ fn forked_tenure_testing( signer_test .running_nodes .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(false); + .set(false); let commits_count = commits_submitted.load(Ordering::SeqCst); if commits_count > commits_before { @@ -1850,7 +1844,7 @@ fn miner_forking() { let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; - naka_skip_commit_op.0.lock().unwrap().replace(false); + naka_skip_commit_op.set(true); info!("------------------------- Reached Epoch 3.0 -------------------------"); let mut sortitions_seen = Vec::new(); @@ -1868,7 +1862,7 @@ fn miner_forking() { .running_nodes .btc_regtest_controller .build_next_block(1); - naka_skip_commit_op.0.lock().unwrap().replace(false); + naka_skip_commit_op.set(false); // wait until a commit is submitted by run_loop_2 wait_for(60, || { @@ -1892,7 +1886,7 @@ fn miner_forking() { // block commits from RL2 -- this will block until the start of the next iteration // in this loop. - naka_skip_commit_op.0.lock().unwrap().replace(true); + naka_skip_commit_op.set(true); // ensure RL1 performs an RBF after unblock block broadcast let rl1_commits_before = signer_test .running_nodes @@ -2499,10 +2493,7 @@ fn empty_sortition() { signer_test .running_nodes .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(true); + .set(true); let blocks_after = signer_test .running_nodes @@ -5120,10 +5111,7 @@ fn continue_after_tenure_extend() { signer_test .running_nodes .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(true); + .set(true); // It's possible that we have a pending block commit already. // Mine two BTC blocks to "flush" this commit. From b32df684cbf53c17250ff27a0fb302d05f2d30ef Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 31 Oct 2024 09:52:57 -0700 Subject: [PATCH 034/109] Cleanup comments and remove unnecessary clone Signed-off-by: Jacinta Ferrant --- stacks-signer/src/config.rs | 6 +++--- stacks-signer/src/v0/signer.rs | 13 +++++++------ testnet/stacks-node/src/tests/signer/v0.rs | 13 ++++++++----- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 30a44d2068..b11dcc4a41 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -161,7 +161,7 @@ pub struct GlobalConfig { pub block_proposal_timeout: Duration, /// An optional custom Chain ID pub chain_id: Option, - /// How long to wait for a response from a block proposal validation response from the node + /// How long to wait in for a response from a block proposal validation response from the node /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout: Duration, } @@ -193,8 +193,8 @@ struct RawConfigFile { pub block_proposal_timeout_ms: Option, /// An optional custom Chain ID pub chain_id: Option, - /// How long to wait for a response from a block proposal validation response from the node - /// before marking that block as invalid and rejecting it in milliseconds. + /// How long to wait n milliseconds for a response from a block proposal validation response from the node + /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout_ms: Option, } diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 32c4a41b7a..6e965cf634 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -670,17 +670,15 @@ impl Signer { /// Check the current tracked submitted block proposal to see if it has timed out. /// Broadcasts a rejection and marks the block locally rejected if it has. fn check_submitted_block_proposal(&mut self) { - let Some((block_proposal, block_submission)) = self.submitted_block_proposal.clone() else { + let Some((block_proposal, block_submission)) = self.submitted_block_proposal.take() else { // Nothing to check. return; }; if block_submission.elapsed() < self.block_proposal_validation_timeout { - // Not expired yet. + // Not expired yet. Put it back! + self.submitted_block_proposal = Some((block_proposal, block_submission)); return; } - // Let us immediately flush, even if we later encounter an error broadcasting our responses. - // We should still attempt to handle a new proposal at this point. - self.submitted_block_proposal = None; let signature_sighash = block_proposal.block.header.signer_signature_hash(); // For mutability reasons, we need to take the block_info out of the map and add it back after processing let mut block_info = match self @@ -710,6 +708,8 @@ impl Signer { return; } }; + // We cannot determine the validity of the block, but we have not reached consensus on it yet. + // Reject it so we aren't holding up the network because of our inaction. warn!( "{self}: Failed to receive block validation response within {} ms. Rejecting block.", self.block_proposal_validation_timeout.as_millis(); "signer_sighash" => %signature_sighash, @@ -721,7 +721,6 @@ impl Signer { &self.private_key, self.mainnet, ); - // We know proposal is invalid. Send rejection message, do not do further validation if let Err(e) = block_info.mark_locally_rejected() { warn!("{self}: Failed to mark block as locally rejected: {e:?}",); }; @@ -855,6 +854,7 @@ impl Signer { .map(|(proposal, _)| &proposal.block.header.signer_signature_hash() == block_hash) .unwrap_or(false) { + // Consensus reached! No longer bother tracking its validation submission to the node as we are too late to participate in the decision anyway. self.submitted_block_proposal = None; } } @@ -1005,6 +1005,7 @@ impl Signer { .map(|(proposal, _)| &proposal.block.header.signer_signature_hash() == block_hash) .unwrap_or(false) { + // Consensus reached! No longer bother tracking its validation submission to the node as we are too late to participate in the decision anyway. self.submitted_block_proposal = None; } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 612e3a1dd4..3f0140c4a7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5608,7 +5608,9 @@ fn multiple_miners_with_custom_chain_id() { signer_test.shutdown(); } -// Teimout a block validation response +// Ensures that a signer will issue ConnectivityIssues rejections if a block submission +// times out. Also ensures that no other proposal gets submitted for validation if we +// are already waiting for a block submission response. #[test] #[ignore] fn block_validation_response_timeout() { @@ -5732,11 +5734,12 @@ fn block_validation_response_timeout() { }; // We are waiting for the original block proposal which will have a diff signature to our // second proposed block. - if signer_signature_hash != block_signer_signature_hash_1 { + assert_ne!( + signer_signature_hash, block_signer_signature_hash_1, + "Received a rejection for the wrong block" + ); + if matches!(reason_code, RejectCode::ConnectivityIssues) { rejected_signers.push(signature); - if matches!(reason_code, RejectCode::ConnectivityIssues) { - break; - } } } assert!( From 18662ccd70897660b5070f234eb56d1deeb0132b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 31 Oct 2024 10:00:04 -0700 Subject: [PATCH 035/109] Clippy fix signer and stackslib cli.rs Signed-off-by: Jacinta Ferrant --- stacks-signer/src/config.rs | 2 +- stacks-signer/src/main.rs | 6 +----- stacks-signer/src/monitoring/mod.rs | 3 +-- stacks-signer/src/v0/signer.rs | 2 +- stackslib/src/cli.rs | 4 +--- 5 files changed, 5 insertions(+), 12 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 6fc7c7b2dd..7dd9cc4fdf 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -335,7 +335,7 @@ Metrics endpoint: {metrics_endpoint} /// Get the chain ID for the network pub fn to_chain_id(&self) -> u32 { - self.chain_id.unwrap_or_else(|| match self.network { + self.chain_id.unwrap_or(match self.network { Network::Mainnet => CHAIN_ID_MAINNET, Network::Testnet | Network::Mocknet => CHAIN_ID_TESTNET, }) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index a23918f6f8..eac60cc53f 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -157,11 +157,7 @@ fn handle_generate_stacking_signature( fn handle_check_config(args: RunSignerArgs) { let config = GlobalConfig::try_from(&args.config).unwrap(); - println!( - "Signer version: {}\nConfig: \n{}", - VERSION_STRING.to_string(), - config - ); + println!("Signer version: {}\nConfig: \n{}", *VERSION_STRING, config); } fn handle_generate_vote(args: GenerateVoteArgs, do_print: bool) -> MessageSignature { diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index 621886b9c0..4f6956051c 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -97,8 +97,7 @@ pub fn update_signer_nonce(nonce: u64) { #[allow(dead_code)] /// Remove the origin from the full path to avoid duplicate metrics for different origins fn remove_origin_from_path(full_path: &str, origin: &str) -> String { - let path = full_path.replace(origin, ""); - path + full_path.replace(origin, "") } /// Start a new RPC call timer. diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fcda30a270..bcdcd2f7a0 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -191,7 +191,7 @@ impl SignerTrait for Signer { "block_height" => b.header.chain_length, "signer_sighash" => %b.header.signer_signature_hash(), ); - stacks_client.post_block_until_ok(self, &b); + stacks_client.post_block_until_ok(self, b); } SignerMessage::MockProposal(mock_proposal) => { let epoch = match stacks_client.get_node_epoch() { diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 9ff6e55644..587daee787 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -528,8 +528,6 @@ fn replay_block( fn replay_block_nakamoto( sort_db: &mut SortitionDB, stacks_chain_state: &mut StacksChainState, - mut chainstate_tx: ChainstateTx, - clarity_instance: &mut ClarityInstance, block: &NakamotoBlock, block_size: u64, ) -> Result<(), ChainstateError> { @@ -785,7 +783,7 @@ fn replay_block_nakamoto( return Err(e); }; - let (receipt, clarity_commit, reward_set_data) = ok_opt.expect("FATAL: unreachable"); + let (receipt, _clarity_commit, _reward_set_data) = ok_opt.expect("FATAL: unreachable"); assert_eq!( receipt.header.anchored_header.block_hash(), From 07b65cb2a9245fe7df18e772e69622b0a91ed3a1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 31 Oct 2024 13:02:06 -0400 Subject: [PATCH 036/109] chore: remove dead code --- .../nakamoto/download_state_machine.rs | 139 +----------------- 1 file changed, 1 insertion(+), 138 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 02ed8b9419..7655a56ab5 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -465,142 +465,6 @@ impl NakamotoDownloadStateMachine { Ok(()) } - /// Determine if the set of `TenureStartEnd`s represents available but unfetched data. Used to - /// determine whether or not to update the set of wanted tenures -- we don't want to skip - /// fetching wanted tenures if they're still available! - pub(crate) fn have_unprocessed_tenures<'a>( - first_nakamoto_rc: u64, - completed_tenures: &HashSet, - prev_wanted_tenures: &[WantedTenure], - tenure_block_ids: &HashMap, - pox_constants: &PoxConstants, - first_burn_height: u64, - inventory_iter: impl Iterator, - ) -> bool { - if prev_wanted_tenures.is_empty() { - debug!("prev_wanted_tenures is empty, so we have unprocessed tenures"); - return true; - } - - // the anchor block for prev_wanted_tenures must not only be processed, but also we have to - // have seen an inventory message from the subsequent reward cycle. If we can see - // inventory messages for the reward cycle after `prev_wanted_rc`, then the former will be - // true - let prev_wanted_rc = prev_wanted_tenures - .last() - .map(|wt| { - downloader_block_height_to_reward_cycle( - pox_constants, - first_burn_height, - wt.burn_height, - ) - .expect("FATAL: wanted tenure before system start") - }) - .unwrap_or(u64::MAX); - - let cur_wanted_rc = prev_wanted_rc.saturating_add(1); - - debug!( - "have_unprocessed_tenures: prev_wanted_rc = {}, cur_wanted_rc = {}", - prev_wanted_rc, cur_wanted_rc - ); - - let mut has_prev_inv = false; - let mut has_cur_inv = false; - let mut num_invs = 0; - for inv in inventory_iter { - num_invs += 1; - if prev_wanted_rc < first_nakamoto_rc { - // assume the epoch 2.x inventory has this - has_prev_inv = true; - } else if inv.tenures_inv.get(&prev_wanted_rc).is_some() { - has_prev_inv = true; - } - - if cur_wanted_rc < first_nakamoto_rc { - // assume the epoch 2.x inventory has this - has_cur_inv = true; - } else if inv.tenures_inv.get(&cur_wanted_rc).is_some() { - has_cur_inv = true; - } - } - - if !has_prev_inv || !has_cur_inv { - debug!("No peer has an inventory for either the previous ({}: available = {}) or current ({}: available = {}) wanted tenures. Total inventories: {}", prev_wanted_rc, has_prev_inv, cur_wanted_rc, has_cur_inv, num_invs); - return true; - } - - // the state machine updates `tenure_block_ids` _after_ `wanted_tenures`, so verify that - // this isn't a stale `tenure_block_ids` by checking that it contains at least one block in - // the prev_wanted_rc and at least one in the cur_wanted_rc - let mut has_prev_rc_block = false; - let mut has_cur_rc_block = false; - let mut available_considered = 0; - for (_naddr, available) in tenure_block_ids.iter() { - available_considered += available.len(); - debug!("Consider available tenures from {}", _naddr); - for (_ch, tenure_info) in available.iter() { - debug!("Consider tenure info for {}: {:?}", _ch, tenure_info); - if tenure_info.start_reward_cycle == prev_wanted_rc - || tenure_info.end_reward_cycle == prev_wanted_rc - { - has_prev_rc_block = true; - debug!( - "Consider tenure info for {}: have a tenure in prev reward cycle {}", - _ch, prev_wanted_rc - ); - } - if tenure_info.start_reward_cycle == cur_wanted_rc - || tenure_info.end_reward_cycle == cur_wanted_rc - { - has_cur_rc_block = true; - debug!( - "Consider tenure info for {}: have a tenure in cur reward cycle {}", - _ch, cur_wanted_rc - ); - } - } - } - - if available_considered > 0 - && ((prev_wanted_rc >= first_nakamoto_rc && !has_prev_rc_block) - || (cur_wanted_rc >= first_nakamoto_rc && !has_cur_rc_block)) - { - debug!( - "tenure_block_ids stale: missing representation in reward cycles {} ({}) and {} ({})", - prev_wanted_rc, - has_prev_rc_block, - cur_wanted_rc, - has_cur_rc_block, - ); - return true; - } - - let mut ret = false; - for (_naddr, available) in tenure_block_ids.iter() { - for wt in prev_wanted_tenures.iter() { - let Some(tenure_info) = available.get(&wt.tenure_id_consensus_hash) else { - continue; - }; - if completed_tenures.contains(&tenure_info.tenure_id_consensus_hash) { - // this check is necessary because the check for .processed requires that a - // child tenure block has been processed, which isn't guaranteed at a reward - // cycle boundary - debug!("Tenure {:?} has been fully downloaded", &tenure_info); - continue; - } - if !tenure_info.processed { - debug!( - "Tenure {:?} is available from {} but not processed", - &tenure_info, &_naddr - ); - ret = true; - } - } - } - ret - } - /// Update the state machine's wanted tenures and processed tenures, if it's time to do so. /// This will only happen when the sortition DB has finished processing a reward cycle of /// tenures when in IBD mode, _OR_ when the sortition tip advances when in steady-state mode. @@ -612,8 +476,7 @@ impl NakamotoDownloadStateMachine { /// cycle boundaries, where the sortition DB is about to begin processing a new reward cycle. /// The list of wanted tenures for the current reward cycle will be saved as /// `self.prev_wanted_tenures`, and the set of wanted tenures for the next reward cycle - /// will be stored to `self.wanted_tenures`. It will only update these two lists if it is safe - /// to do so, as determined by `have_unprocessed_tenures()`. + /// will be stored to `self.wanted_tenures`. /// /// In the second case (i.e. not a reward cycle boundary), this function will load up _new_ /// wanted tenure data and append it to `self.wanted_tenures` via From 27e7301844b7b2a90407f712315a04c05fc4f406 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 31 Oct 2024 13:02:25 -0400 Subject: [PATCH 037/109] chore: count download attempts, and don't download processed tenures, and clean out completed tenures based on whether or not we see them processed --- .../nakamoto/tenure_downloader_set.rs | 100 ++++++++++++++---- 1 file changed, 78 insertions(+), 22 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 8fa845cfab..4703693a11 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -67,6 +67,33 @@ use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +struct CompletedTenure { + tenure_id: ConsensusHash, + start_block: StacksBlockId, + end_block: StacksBlockId, +} + +impl From<&TenureStartEnd> for CompletedTenure { + fn from(tse: &TenureStartEnd) -> Self { + Self { + tenure_id: tse.tenure_id_consensus_hash.clone(), + start_block: tse.start_block_id.clone(), + end_block: tse.end_block_id.clone(), + } + } +} + +impl From<&mut NakamotoTenureDownloader> for CompletedTenure { + fn from(ntd: &mut NakamotoTenureDownloader) -> Self { + Self { + tenure_id: ntd.tenure_id_consensus_hash, + start_block: ntd.tenure_start_block_id, + end_block: ntd.tenure_end_block_id, + } + } +} + /// A set of confirmed downloader state machines assigned to one or more neighbors. The block /// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure /// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer @@ -83,7 +110,9 @@ pub struct NakamotoTenureDownloaderSet { pub(crate) peers: HashMap, /// The set of tenures that have been successfully downloaded (but possibly not yet stored or /// processed) - pub(crate) completed_tenures: HashSet, + pub(crate) completed_tenures: HashSet, + /// Number of times a tenure download was attempted + pub(crate) attempted_tenures: HashMap, } impl NakamotoTenureDownloaderSet { @@ -92,6 +121,7 @@ impl NakamotoTenureDownloaderSet { downloaders: vec![], peers: HashMap::new(), completed_tenures: HashSet::new(), + attempted_tenures: HashMap::new(), } } @@ -180,15 +210,6 @@ impl NakamotoTenureDownloaderSet { cnt } - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. pub fn is_empty(&self) -> bool { for downloader_opt in self.downloaders.iter() { @@ -341,11 +362,6 @@ impl NakamotoTenureDownloaderSet { let Some(ch) = schedule.front() else { break; }; - if self.completed_tenures.contains(&ch) { - debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } let Some(neighbors) = available.get_mut(ch) else { // not found on any neighbors, so stop trying this tenure debug!("No neighbors have tenure {}", ch); @@ -382,6 +398,24 @@ impl NakamotoTenureDownloaderSet { debug!("Neighbor {} does not serve tenure {}", &naddr, ch); continue; }; + if tenure_info.processed { + // we already have this tenure + debug!("Already have processed tenure {}", ch); + self.completed_tenures + .remove(&CompletedTenure::from(tenure_info)); + continue; + } + if self + .completed_tenures + .contains(&CompletedTenure::from(tenure_info)) + { + debug!( + "Already successfully downloaded tenure {} ({}-{})", + &ch, &tenure_info.start_block_id, &tenure_info.end_block_id + ); + schedule.pop_front(); + continue; + } let Some(Some(start_reward_set)) = current_reward_cycles .get(&tenure_info.start_reward_cycle) .map(|cycle_info| cycle_info.reward_set()) @@ -407,7 +441,16 @@ impl NakamotoTenureDownloaderSet { continue; }; + let attempt_count = if let Some(attempt_count) = self.attempted_tenures.get(&ch) { + *attempt_count + } else { + 0 + }; + self.attempted_tenures + .insert(ch.clone(), attempt_count.saturating_add(1)); + info!("Download tenure {}", &ch; + "attempt" => attempt_count.saturating_add(1), "tenure_start_block" => %tenure_info.start_block_id, "tenure_end_block" => %tenure_info.end_block_id, "tenure_start_reward_cycle" => tenure_info.start_reward_cycle, @@ -474,7 +517,7 @@ impl NakamotoTenureDownloaderSet { &naddr, &downloader.tenure_id_consensus_hash ); finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + finished_tenures.push(CompletedTenure::from(downloader)); continue; } debug!( @@ -482,7 +525,10 @@ impl NakamotoTenureDownloaderSet { &naddr, &downloader.tenure_id_consensus_hash, &downloader.state ); let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - debug!("Downloader for {} failed; this peer is dead", &naddr); + info!( + "Downloader for tenure {} to {} failed; this peer is dead", + &downloader.tenure_id_consensus_hash, &naddr + ); neighbor_rpc.add_dead(network, naddr); continue; }; @@ -523,11 +569,17 @@ impl NakamotoTenureDownloaderSet { let Ok(blocks_opt) = downloader .handle_next_download_response(response) .map_err(|e| { - debug!("Failed to handle response from {}: {:?}", &naddr, &e); + info!( + "Failed to handle response from {} on tenure {}: {:?}", + &naddr, &downloader.tenure_id_consensus_hash, &e + ); e }) else { - debug!("Failed to handle download response from {}", &naddr); + info!( + "Failed to handle download response from {} on tenure {}", + &naddr, &downloader.tenure_id_consensus_hash + ); neighbor_rpc.add_dead(network, &naddr); continue; }; @@ -543,12 +595,16 @@ impl NakamotoTenureDownloaderSet { ); new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); if downloader.is_done() { + info!( + "Downloader for tenure {} is finished", + &downloader.tenure_id_consensus_hash + ); debug!( - "Downloader for {} on tenure {} is finished", - &naddr, &downloader.tenure_id_consensus_hash + "Downloader for tenure {} finished on {}", + &downloader.tenure_id_consensus_hash, &naddr ); finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + finished_tenures.push(CompletedTenure::from(downloader)); continue; } } From d1bf24f75a22d4f1459e63dd32d497ea65deebe1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 31 Oct 2024 13:02:52 -0400 Subject: [PATCH 038/109] chore: p2p --> relayer channel only needs one slot --- testnet/stacks-node/src/nakamoto_node.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 7cda49e10d..ff98058053 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -47,7 +47,7 @@ pub mod sign_coordinator; use self::peer::PeerThread; use self::relayer::{RelayerDirective, RelayerThread}; -pub const RELAYER_MAX_BUFFER: usize = 100; +pub const RELAYER_MAX_BUFFER: usize = 1; const VRF_MOCK_MINER_KEY: u64 = 1; pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB From 72c9f54a67ac7bc3ec78eb92f6e7025c97235b9d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 31 Oct 2024 13:04:39 -0400 Subject: [PATCH 039/109] chore: pub(crate) visibility to avoid private leakage --- stackslib/src/net/download/nakamoto/tenure_downloader_set.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 4703693a11..f985a58cad 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -68,7 +68,7 @@ use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] -struct CompletedTenure { +pub(crate) struct CompletedTenure { tenure_id: ConsensusHash, start_block: StacksBlockId, end_block: StacksBlockId, From f5c8b03e2005c949218fe93a711bc964240f8f66 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 31 Oct 2024 11:23:50 -0700 Subject: [PATCH 040/109] feat: setup test to repro #5400 --- testnet/stacks-node/src/tests/signer/v0.rs | 195 +++++++++++++++++++++ 1 file changed, 195 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e4a4deea37..ecb3ac46bd 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5074,6 +5074,201 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { assert_ne!(block_n_2, block_n); } +/// Test a scenario where: +/// We have one miner. During block A, there is a sortition and a TenureChange. +/// Block B is mined, but it does not contain a TenureChange (ie because a +/// new burn block was mined too quickly). +/// Then block C occurs, which does not have a sortition. +#[test] +#[ignore] +fn continue_after_fast_block_no_sortition() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr.clone(), (send_amt + send_fee) * 5)], + ); + let timeout = Duration::from_secs(200); + let _coord_channel = signer_test.running_nodes.coord_channel.clone(); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + .unwrap() + .block_height + }; + + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + info!("------------------------- Mine Normal Tenure -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // Wait for a new block commit + wait_for(20, || { + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + // 2 because we mined one block in the normal tenure + Ok(commits - commits_before >= 2) + }) + .expect("Timed out waiting for a new block commit"); + + // Make all signers ignore block proposals + let ignoring_signers: Vec<_> = all_signers.iter().cloned().collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(ignoring_signers.clone()); + + // Don't make future block commits + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + let burn_height_before = get_burn_height(); + + let rejections_before = signer_test + .running_nodes + .nakamoto_blocks_rejected + .load(Ordering::SeqCst); + + // Mine a new burn block + info!("------------------------- Starting Tenure B -------------------------"; + "burn_height_before" => burn_height_before, + "rejections_before" => rejections_before, + ); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + // assure we have a sortition + let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + assert!(tip.sortition); + + let burn_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .burn_block_height; + + info!("----- Waiting for block rejections -----"); + let min_rejections = (num_signers as u64) * 4 / 10; + // Wait until we have some block rejections + wait_for(30, || { + let rejections = signer_test + .running_nodes + .nakamoto_blocks_rejected + .load(Ordering::SeqCst); + let rejections_diff = rejections - rejections_before; + Ok(rejections_diff >= min_rejections) + }) + .expect("Timed out waiting for block rejections"); + + // Miner another block and ensure there is _no_ sortition + info!("------------------------- Mine another block -------------------------"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let burn_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .burn_block_height; + Ok(burn_height > burn_height_before) + }, + ) + .unwrap(); + + // Verify that no Stacks blocks have been mined (signers are ignoring) + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + assert_eq!(stacks_height, stacks_height_before); + + let stacks_height_before = stacks_height; + + info!("----- Enabling signers to approve proposals -----"; + "stacks_height" => stacks_height_before, + ); + + // Allow signers to respond to proposals again + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }) + .expect("Expected a new Stacks block to be mined"); + + let blocks = test_observer::get_blocks(); + // Debug the last 4 blocks + let blocks = blocks.iter().rev().take(4).rev().collect::>(); + for block in blocks { + println!("\n\n"); + info!("Block: {}", serde_json::to_string_pretty(&block).unwrap()); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions.iter().rev() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx != "0x00" { + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = + StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); + info!("Tx: {}", serde_json::to_string_pretty(&parsed).unwrap()); + } + } + } +} + #[test] #[ignore] /// Test that we can mine a tenure extend and then continue mining afterwards. From 9361bea0515c68046adc55bd61f5e6fe95213816 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 31 Oct 2024 15:01:35 -0400 Subject: [PATCH 041/109] chore: log attempt failures, and only start as many downloaders as given --- .../nakamoto/tenure_downloader_set.rs | 33 +++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index f985a58cad..8fc9a4afef 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -113,6 +113,8 @@ pub struct NakamotoTenureDownloaderSet { pub(crate) completed_tenures: HashSet, /// Number of times a tenure download was attempted pub(crate) attempted_tenures: HashMap, + /// Number of times a tenure download failed + pub(crate) attempt_failed_tenures: HashMap, } impl NakamotoTenureDownloaderSet { @@ -122,6 +124,17 @@ impl NakamotoTenureDownloaderSet { peers: HashMap::new(), completed_tenures: HashSet::new(), attempted_tenures: HashMap::new(), + attempt_failed_tenures: HashMap::new(), + } + } + + /// Mark a tenure as having failed to download. + /// Implemented statically to appease the borrow checker. + fn mark_failure(attempt_failed_tenures: &mut HashMap, ch: &ConsensusHash) { + if let Some(failures) = attempt_failed_tenures.get_mut(ch) { + *failures = (*failures).saturating_add(1); + } else { + attempt_failed_tenures.insert(ch.clone(), 1); } } @@ -358,7 +371,7 @@ impl NakamotoTenureDownloaderSet { self.clear_finished_downloaders(); self.clear_available_peers(); - while self.inflight() < count { + while self.num_scheduled_downloaders() < count { let Some(ch) = schedule.front() else { break; }; @@ -449,8 +462,16 @@ impl NakamotoTenureDownloaderSet { self.attempted_tenures .insert(ch.clone(), attempt_count.saturating_add(1)); + let attempt_failed_count = + if let Some(attempt_failed_count) = self.attempt_failed_tenures.get(&ch) { + *attempt_failed_count + } else { + 0 + }; + info!("Download tenure {}", &ch; "attempt" => attempt_count.saturating_add(1), + "failed" => attempt_failed_count, "tenure_start_block" => %tenure_info.start_block_id, "tenure_end_block" => %tenure_info.end_block_id, "tenure_start_reward_cycle" => tenure_info.start_reward_cycle, @@ -529,6 +550,10 @@ impl NakamotoTenureDownloaderSet { "Downloader for tenure {} to {} failed; this peer is dead", &downloader.tenure_id_consensus_hash, &naddr ); + Self::mark_failure( + &mut self.attempt_failed_tenures, + &downloader.tenure_id_consensus_hash, + ); neighbor_rpc.add_dead(network, naddr); continue; }; @@ -576,10 +601,14 @@ impl NakamotoTenureDownloaderSet { e }) else { - info!( + debug!( "Failed to handle download response from {} on tenure {}", &naddr, &downloader.tenure_id_consensus_hash ); + Self::mark_failure( + &mut self.attempt_failed_tenures, + &downloader.tenure_id_consensus_hash, + ); neighbor_rpc.add_dead(network, &naddr); continue; }; From c88d0e6e8586758f5eb527e06f08681e72c65a25 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 31 Oct 2024 15:18:15 -0400 Subject: [PATCH 042/109] chore: log more downloader diagnostics --- .../download/nakamoto/tenure_downloader_set.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 8fc9a4afef..52835854d1 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -285,7 +285,7 @@ impl NakamotoTenureDownloaderSet { idled.push(naddr.clone()); continue; }; - let Some(downloader) = downloader_opt else { + let Some(downloader) = downloader_opt.as_ref() else { debug!("Remove peer {} for null download {}", &naddr, i); idled.push(naddr.clone()); continue; @@ -307,10 +307,11 @@ impl NakamotoTenureDownloaderSet { /// this up with a call to `clear_available_peers()`. pub fn clear_finished_downloaders(&mut self) { for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { + if downloader_opt + .as_ref() + .map(|dl| dl.is_done()) + .unwrap_or(false) + { *downloader_opt = None; } } @@ -470,8 +471,13 @@ impl NakamotoTenureDownloaderSet { }; info!("Download tenure {}", &ch; + "peer" => %naddr, "attempt" => attempt_count.saturating_add(1), "failed" => attempt_failed_count, + "downloads_scheduled" => %self.num_scheduled_downloaders(), + "downloads_total" => %self.num_downloaders(), + "downloads_max_count" => count, + "downloads_inflight" => self.inflight(), "tenure_start_block" => %tenure_info.start_block_id, "tenure_end_block" => %tenure_info.end_block_id, "tenure_start_reward_cycle" => tenure_info.start_reward_cycle, From fa493d5d6ea0c8af2bc53edff7c8ec7a880407a6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 31 Oct 2024 15:38:52 -0400 Subject: [PATCH 043/109] chore: deprioritize unreliable peers --- .../nakamoto/tenure_downloader_set.rs | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 52835854d1..c0b64cf5ce 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -94,6 +94,8 @@ impl From<&mut NakamotoTenureDownloader> for CompletedTenure { } } +pub const PEER_DEPRIORITIZATION_TIME: u64 = 60; + /// A set of confirmed downloader state machines assigned to one or more neighbors. The block /// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure /// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer @@ -115,6 +117,9 @@ pub struct NakamotoTenureDownloaderSet { pub(crate) attempted_tenures: HashMap, /// Number of times a tenure download failed pub(crate) attempt_failed_tenures: HashMap, + /// Peers that should be deprioritized because they're dead (maps to when they can be used + /// again) + pub(crate) deprioritized_peers: HashMap, } impl NakamotoTenureDownloaderSet { @@ -125,6 +130,7 @@ impl NakamotoTenureDownloaderSet { completed_tenures: HashSet::new(), attempted_tenures: HashMap::new(), attempt_failed_tenures: HashMap::new(), + deprioritized_peers: HashMap::new(), } } @@ -138,6 +144,18 @@ impl NakamotoTenureDownloaderSet { } } + /// Mark a peer as deprioritized + /// Implemented statically to appease the borrow checker. + fn mark_deprioritized( + deprioritized_peers: &mut HashMap, + peer: &NeighborAddress, + ) { + deprioritized_peers.insert( + peer.clone(), + get_epoch_time_secs() + PEER_DEPRIORITIZATION_TIME, + ); + } + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if /// needed. fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { @@ -393,6 +411,15 @@ impl NakamotoTenureDownloaderSet { schedule.pop_front(); continue; }; + if get_epoch_time_secs() < *self.deprioritized_peers.get(&naddr).unwrap_or(&0) { + debug!( + "Peer {} is deprioritized until {}", + &naddr, + self.deprioritized_peers.get(&naddr).unwrap_or(&0) + ); + continue; + } + if self.try_resume_peer(naddr.clone()) { continue; }; @@ -560,6 +587,7 @@ impl NakamotoTenureDownloaderSet { &mut self.attempt_failed_tenures, &downloader.tenure_id_consensus_hash, ); + Self::mark_deprioritized(&mut self.deprioritized_peers, &naddr); neighbor_rpc.add_dead(network, naddr); continue; }; @@ -615,6 +643,7 @@ impl NakamotoTenureDownloaderSet { &mut self.attempt_failed_tenures, &downloader.tenure_id_consensus_hash, ); + Self::mark_deprioritized(&mut self.deprioritized_peers, &naddr); neighbor_rpc.add_dead(network, &naddr); continue; }; From 118cc19c5ee97f4d6bb0165198e16f5623ffe51d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 31 Oct 2024 15:54:57 -0700 Subject: [PATCH 044/109] Use thiserror throughout testnet/stacks-node Signed-off-by: Jacinta Ferrant --- Cargo.lock | 9 ++--- Cargo.toml | 1 + libsigner/Cargo.toml | 2 +- stacks-signer/Cargo.toml | 2 +- testnet/stacks-node/Cargo.toml | 1 + testnet/stacks-node/src/burnchains/mod.rs | 36 ++++++------------- .../stacks-node/src/tests/bitcoin_regtest.rs | 13 ++----- 7 files changed, 22 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 227cd9d768..8a3769b6a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3328,6 +3328,7 @@ dependencies = [ "stackslib", "stx-genesis", "tempfile", + "thiserror", "tikv-jemallocator", "tiny_http", "tokio", @@ -3592,18 +3593,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 10dc427e2e..c00c223c47 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } +thiserror = { version = "1.0.65" } # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 63241d3256..7c472365a1 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -30,7 +30,7 @@ slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib"} -thiserror = "1.0" +thiserror = { workspace = true } tiny_http = "0.12" [dev-dependencies] diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index da94cc10de..139c34fba8 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -38,7 +38,7 @@ slog-json = { version = "2.3.0", optional = true } slog-term = "2.6.0" stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib" } -thiserror = "1.0" +thiserror = { workspace = true } tiny_http = { version = "0.12", optional = true } toml = "0.5.6" tracing = "0.1.37" diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 958820b491..0c68d22ee7 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -32,6 +32,7 @@ rusqlite = { workspace = true } async-h1 = { version = "2.3.2", optional = true } async-std = { version = "1.6", optional = true, features = ["attributes"] } http-types = { version = "2.12", optional = true } +thiserror = { workspace = true } [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} diff --git a/testnet/stacks-node/src/burnchains/mod.rs b/testnet/stacks-node/src/burnchains/mod.rs index 0c9446304d..0509993dd0 100644 --- a/testnet/stacks-node/src/burnchains/mod.rs +++ b/testnet/stacks-node/src/burnchains/mod.rs @@ -1,7 +1,6 @@ pub mod bitcoin_regtest_controller; pub mod mocknet_controller; -use std::fmt; use std::time::Instant; use stacks::burnchains; @@ -16,41 +15,26 @@ pub use self::bitcoin_regtest_controller::{make_bitcoin_indexer, BitcoinRegtestC pub use self::mocknet_controller::MocknetController; use super::operations::BurnchainOpSigner; -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum Error { + #[error("ChainsCoordinator closed")] CoordinatorClosed, - IndexerError(burnchains::Error), + #[error("Indexer error: {0}")] + IndexerError(#[from] burnchains::Error), + #[error("Burnchain error")] BurnchainError, + #[error("Max fee rate exceeded")] MaxFeeRateExceeded, + #[error("Identical operation, not submitting")] IdenticalOperation, + #[error("No UTXOs available")] NoUTXOs, + #[error("Transaction submission failed: {0}")] TransactionSubmissionFailed(String), + #[error("Serializer error: {0}")] SerializerError(CodecError), } -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Error::CoordinatorClosed => write!(f, "ChainsCoordinator closed"), - Error::IndexerError(ref e) => write!(f, "Indexer error: {:?}", e), - Error::BurnchainError => write!(f, "Burnchain error"), - Error::MaxFeeRateExceeded => write!(f, "Max fee rate exceeded"), - Error::IdenticalOperation => write!(f, "Identical operation, not submitting"), - Error::NoUTXOs => write!(f, "No UTXOs available"), - Error::TransactionSubmissionFailed(e) => { - write!(f, "Transaction submission failed: {e}") - } - Error::SerializerError(e) => write!(f, "Serializer error: {e}"), - } - } -} - -impl From for Error { - fn from(e: burnchains::Error) -> Self { - Error::IndexerError(e) - } -} - pub trait BurnchainController { fn start(&mut self, target_block_height_opt: Option) -> Result<(BurnchainTip, u64), Error>; diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 621f92aa47..90b1310183 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -17,21 +17,14 @@ use crate::helium::RunLoop; use crate::tests::to_addr; use crate::Config; -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum BitcoinCoreError { + #[error("bitcoind spawn failed: {0}")] SpawnFailed(String), + #[error("bitcoind stop failed: {0}")] StopFailed(String), } -impl std::fmt::Display for BitcoinCoreError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::SpawnFailed(msg) => write!(f, "bitcoind spawn failed: {msg}"), - Self::StopFailed(msg) => write!(f, "bitcoind stop failed: {msg}"), - } - } -} - type BitcoinResult = Result; pub struct BitcoinCoreController { From 28c723b64de43c5bd5e8ce9f840d9de9ce65ff56 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 1 Nov 2024 13:34:34 -0500 Subject: [PATCH 045/109] feat: add index for stacks block id in nakamoto_block_headers --- stackslib/src/chainstate/stacks/db/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 160e2dc60e..9996af199a 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -903,6 +903,7 @@ const CHAINSTATE_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS index_block_header_by_affirmation_weight ON block_headers(affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_block_header_by_height_and_affirmation_weight ON block_headers(block_height,affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_headers_by_consensus_hash ON block_headers(consensus_hash);", + "CREATE INDEX IF NOT EXISTS index_block_hash ON nakamoto_block_headers(index_block_hash);", ]; pub use stacks_common::consts::MINER_REWARD_MATURITY; From 7d2c13c4fe8e94aff9db807932ea10913ac80a4e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 1 Nov 2024 14:31:52 -0500 Subject: [PATCH 046/109] bump chainstate schema version --- stackslib/src/chainstate/nakamoto/mod.rs | 6 +++++ stackslib/src/chainstate/stacks/db/mod.rs | 29 +++++++++++++++-------- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b8d0441591..0b25fb4504 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -289,6 +289,12 @@ lazy_static! { ); "#, ]; + + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_5: [&'static str; 1] = [ + r#" + UPDATE db_config SET version = "8"; + "# + ]; } #[cfg(test)] diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 9996af199a..530b2ca6d1 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -55,7 +55,7 @@ use crate::chainstate::burn::{ConsensusHash, ConsensusHashExtensions}; use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, NAKAMOTO_CHAINSTATE_SCHEMA_2, - NAKAMOTO_CHAINSTATE_SCHEMA_3, NAKAMOTO_CHAINSTATE_SCHEMA_4, + NAKAMOTO_CHAINSTATE_SCHEMA_3, NAKAMOTO_CHAINSTATE_SCHEMA_4, NAKAMOTO_CHAINSTATE_SCHEMA_5, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -299,14 +299,14 @@ impl DBConfig { }); match epoch_id { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 7, - StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 7, - StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 8, + StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 8, + StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 8, } } } @@ -680,7 +680,7 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &'static str = "7"; +pub const CHAINSTATE_VERSION: &'static str = "8"; const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ "PRAGMA foreign_keys = ON;", @@ -1133,6 +1133,15 @@ impl StacksChainState { tx.execute_batch(cmd)?; } } + "7" => { + // migrate to nakamoto 3 + info!( + "Migrating chainstate schema from version 7 to 8: just bump the schema (added indexes)" + ); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_5.iter() { + tx.execute_batch(cmd)?; + } + } _ => { error!( "Invalid chain state database: expected version = {}, got {}", From eae2ce2c46dbeb84e696214e65fba788cb0c6fb4 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 1 Nov 2024 15:59:00 -0400 Subject: [PATCH 047/109] test: additional scenarios for empty sortition tenure extends --- .github/workflows/bitcoin-tests.yml | 2 + testnet/stacks-node/src/tests/signer/v0.rs | 340 ++++++++++++++++++++- 2 files changed, 339 insertions(+), 3 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 23eed46f1e..456fc27d80 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -99,6 +99,8 @@ jobs: - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid - tests::signer::v0::empty_sortition + - tests::signer::v0::empty_sortition_before_approval + - tests::signer::v0::empty_sortition_before_proposal - tests::signer::v0::bitcoind_forking_test - tests::signer::v0::multiple_miners - tests::signer::v0::mock_sign_epoch_25 diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 234b73684a..5463d9fd6a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -66,14 +66,16 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL}; +use crate::nakamoto_node::miner::{ + TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, +}; use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_controller, - setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + next_block_and_process_new_stacks_block, setup_epoch_3_reward_set, wait_for, + POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, @@ -2591,6 +2593,338 @@ fn empty_sortition() { signer_test.shutdown(); } +#[test] +#[ignore] +/// This test checks the behavior of signers when an empty sortition arrives +/// before the first block of the previous tenure has been approved. +/// Specifically: +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built +/// off the old sortition +fn empty_sortition_before_approval() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; + }, + |_| {}, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .unwrap(); + + let info = get_chain_info(&signer_test.running_nodes.conf); + let burn_height_before = info.burn_block_height; + let stacks_height_before = info.stacks_tip_height; + + info!("Forcing miner to ignore signatures for next block"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + + info!("Pausing block commits to trigger an empty sortition."); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); + + info!("------------------------- Test Mine Tenure A -------------------------"); + let proposed_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + // Mine a regular tenure and wait for a block proposal + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let proposed_count = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + Ok(proposed_count > proposed_before) + }, + ) + .expect("Failed to mine tenure A and propose a block"); + + info!("------------------------- Test Mine Empty Tenure B -------------------------"); + + // Trigger an empty tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + Ok(burn_height == burn_height_before + 2) + }, + ) + .expect("Failed to mine empty tenure"); + + info!("Unpause block commits"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(false); + + info!("Stop ignoring signers and wait for the tip to advance"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip"); + + let info = get_chain_info(&signer_test.running_nodes.conf); + info!("Current state: {:?}", info); + + // Wait for a block with a tenure extend to be mined + wait_for(60, || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + info!("Last block mined: {:?}", last_block); + for tx in last_block["transactions"].as_array().unwrap() { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) => match payload.cause { + TenureChangeCause::Extended => { + info!("Found tenure extend block"); + return Ok(true); + } + TenureChangeCause::BlockFound => { + info!("Found block with tenure change"); + } + }, + payload => { + info!("Found tx with payload: {:?}", payload); + } + }; + } + Ok(false) + }) + .expect("Timed out waiting for tenure extend"); + + let stacks_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip with STX transfer"); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine a normal tenure after the tenure extend"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test checks the behavior of signers when an empty sortition arrives +/// before the first block of the previous tenure has been proposed. +/// Specifically: +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built +/// off the old sortition +fn empty_sortition_before_proposal() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; + }, + |_| {}, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .unwrap(); + + let info = get_chain_info(&signer_test.running_nodes.conf); + let stacks_height_before = info.stacks_tip_height; + + info!("Pause block commits to ensure we get an empty sortition"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); + + info!("Pause miner so it doesn't propose a block before the next tenure arrives"); + TEST_MINE_STALL.lock().unwrap().replace(true); + + info!("------------------------- Test Mine Tenure A and B -------------------------"); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(2); + + // Sleep to ensure the signers see both burn blocks + sleep_ms(5_000); + + info!("Unpause miner"); + TEST_MINE_STALL.lock().unwrap().replace(false); + + info!("Unpause block commits"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(false); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip"); + + let info = get_chain_info(&signer_test.running_nodes.conf); + info!("Current state: {:?}", info); + + // Wait for a block with a tenure extend to be mined + wait_for(60, || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + info!("Last block mined: {:?}", last_block); + for tx in last_block["transactions"].as_array().unwrap() { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) => match payload.cause { + TenureChangeCause::Extended => { + info!("Found tenure extend block"); + return Ok(true); + } + TenureChangeCause::BlockFound => { + info!("Found block with tenure change"); + } + }, + payload => { + info!("Found tx with payload: {:?}", payload); + } + }; + } + Ok(false) + }) + .expect("Timed out waiting for tenure extend"); + + let stacks_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip with STX transfer"); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine a normal tenure after the tenure extend"); + + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks that Epoch 2.5 signers will issue a mock signature per burn block they receive. From 1074fe0b46ad7125193a3050cabdeecde488fe10 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Fri, 1 Nov 2024 17:20:19 -0400 Subject: [PATCH 048/109] fix: off-by-one error in reward set caching logic in p2p stack --- stackslib/src/net/p2p.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 054fefaf1d..1f5320e60d 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4304,7 +4304,9 @@ impl PeerNetwork { for rc in [cur_rc, prev_rc, prev_prev_rc] { debug!("Refresh reward cycle info for cycle {}", rc); - let rc_start_height = self.burnchain.nakamoto_first_block_of_cycle(rc); + // NOTE: + 1 needed because the sortition db indexes anchor blocks at index height 1, + // not 0 + let rc_start_height = self.burnchain.nakamoto_first_block_of_cycle(rc) + 1; let Some(ancestor_sort_id) = get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? else { From 20f2f4d0caaa0d0fad6457d81637340d276b63bf Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 2 Nov 2024 09:33:41 -0500 Subject: [PATCH 049/109] use schema_5 for index creation rather than indices list --- stackslib/src/chainstate/nakamoto/mod.rs | 6 ++++-- stackslib/src/chainstate/stacks/db/mod.rs | 1 - 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 0b25fb4504..d88082ae41 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -290,10 +290,12 @@ lazy_static! { "#, ]; - pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_5: [&'static str; 1] = [ + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_5: [&'static str; 2] = [ r#" UPDATE db_config SET version = "8"; - "# + "#, + // Add an index for index block hash in nakamoto block headers + "CREATE INDEX IF NOT EXISTS index_block_hash ON nakamoto_block_headers(index_block_hash);", ]; } diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 530b2ca6d1..e899be993e 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -903,7 +903,6 @@ const CHAINSTATE_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS index_block_header_by_affirmation_weight ON block_headers(affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_block_header_by_height_and_affirmation_weight ON block_headers(block_height,affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_headers_by_consensus_hash ON block_headers(consensus_hash);", - "CREATE INDEX IF NOT EXISTS index_block_hash ON nakamoto_block_headers(index_block_hash);", ]; pub use stacks_common::consts::MINER_REWARD_MATURITY; From da557e3f63e1ebd532f43978ea33de19e5544682 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 2 Nov 2024 10:00:51 -0500 Subject: [PATCH 050/109] changelog entry --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe5e200d17..ff5fdd588b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,9 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Changed +- Add index for StacksBlockId to nakamoto block headers table (improves node performance) + ## [3.0.0.0.0] ### Added From 722d01b64a51bbbbd2d12cb3a0ac82098b30a84f Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sat, 2 Nov 2024 14:53:53 -0500 Subject: [PATCH 051/109] chore: clean out migration comments --- stackslib/src/chainstate/stacks/db/mod.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index e899be993e..6b6f523f88 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1087,28 +1087,24 @@ impl StacksChainState { while db_config.version != CHAINSTATE_VERSION { match db_config.version.as_str() { "1" => { - // migrate to 2 info!("Migrating chainstate schema from version 1 to 2"); for cmd in CHAINSTATE_SCHEMA_2.iter() { tx.execute_batch(cmd)?; } } "2" => { - // migrate to 3 info!("Migrating chainstate schema from version 2 to 3"); for cmd in CHAINSTATE_SCHEMA_3.iter() { tx.execute_batch(cmd)?; } } "3" => { - // migrate to nakamoto 1 info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { tx.execute_batch(cmd)?; } } "4" => { - // migrate to nakamoto 2 info!( "Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo" ); @@ -1117,14 +1113,12 @@ impl StacksChainState { } } "5" => { - // migrate to nakamoto 3 info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { tx.execute_batch(cmd)?; } } "6" => { - // migrate to nakamoto 3 info!( "Migrating chainstate schema from version 6 to 7: adds signer_stats table" ); @@ -1133,9 +1127,8 @@ impl StacksChainState { } } "7" => { - // migrate to nakamoto 3 info!( - "Migrating chainstate schema from version 7 to 8: just bump the schema (added indexes)" + "Migrating chainstate schema from version 7 to 8: add index for nakamoto block headers" ); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_5.iter() { tx.execute_batch(cmd)?; From 72d45f57e07a6d64b0eba88106676951ab02513c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sat, 2 Nov 2024 14:34:13 -0700 Subject: [PATCH 052/109] Fix clippy in stacks node Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/stacks/tests/mod.rs | 4 +- .../burnchains/bitcoin_regtest_controller.rs | 187 ++- .../src/burnchains/mocknet_controller.rs | 6 +- testnet/stacks-node/src/chain_data.rs | 53 +- testnet/stacks-node/src/config.rs | 144 +- testnet/stacks-node/src/event_dispatcher.rs | 72 +- testnet/stacks-node/src/globals.rs | 11 +- testnet/stacks-node/src/keychain.rs | 24 +- testnet/stacks-node/src/main.rs | 19 +- testnet/stacks-node/src/nakamoto_node.rs | 8 +- .../stacks-node/src/nakamoto_node/miner.rs | 59 +- testnet/stacks-node/src/nakamoto_node/peer.rs | 6 +- .../stacks-node/src/nakamoto_node/relayer.rs | 51 +- .../src/nakamoto_node/sign_coordinator.rs | 16 +- testnet/stacks-node/src/neon_node.rs | 467 ++++--- testnet/stacks-node/src/node.rs | 68 +- testnet/stacks-node/src/operations.rs | 3 +- .../stacks-node/src/run_loop/boot_nakamoto.rs | 6 +- testnet/stacks-node/src/run_loop/helium.rs | 24 +- testnet/stacks-node/src/run_loop/mod.rs | 11 +- testnet/stacks-node/src/run_loop/nakamoto.rs | 17 +- testnet/stacks-node/src/run_loop/neon.rs | 44 +- testnet/stacks-node/src/stacks_events.rs | 2 +- testnet/stacks-node/src/syncctl.rs | 20 +- testnet/stacks-node/src/tenure.rs | 9 +- .../stacks-node/src/tests/bitcoin_regtest.rs | 36 +- testnet/stacks-node/src/tests/epoch_205.rs | 68 +- testnet/stacks-node/src/tests/epoch_21.rs | 686 ++++------ testnet/stacks-node/src/tests/epoch_22.rs | 275 ++-- testnet/stacks-node/src/tests/epoch_23.rs | 46 +- testnet/stacks-node/src/tests/epoch_24.rs | 206 ++- testnet/stacks-node/src/tests/epoch_25.rs | 8 +- testnet/stacks-node/src/tests/integrations.rs | 397 +++--- testnet/stacks-node/src/tests/mempool.rs | 179 +-- testnet/stacks-node/src/tests/mod.rs | 203 +-- .../src/tests/nakamoto_integrations.rs | 648 ++++----- .../src/tests/neon_integrations.rs | 1195 +++++++---------- testnet/stacks-node/src/tests/signer/mod.rs | 32 +- testnet/stacks-node/src/tests/signer/v0.rs | 271 ++-- testnet/stacks-node/src/tests/stackerdb.rs | 40 +- 40 files changed, 2392 insertions(+), 3229 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 8b66c019f0..6e6fdfd8f7 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -475,14 +475,14 @@ impl TestStacksNode { }; if StacksChainState::has_stored_block( - &self.chainstate.db(), + self.chainstate.db(), &self.chainstate.blocks_path, &consensus_hash, &bc.block_header_hash, ) .unwrap() && !StacksChainState::is_block_orphaned( - &self.chainstate.db(), + self.chainstate.db(), &consensus_hash, &bc.block_header_hash, ) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 82282926d3..06cc4799ff 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -186,12 +186,11 @@ pub fn make_bitcoin_indexer( let (_, network_type) = config.burnchain.get_bitcoin_network(); let indexer_runtime = BitcoinIndexerRuntime::new(network_type); - let burnchain_indexer = BitcoinIndexer { + BitcoinIndexer { config: indexer_config, runtime: indexer_runtime, - should_keep_running: should_keep_running, - }; - burnchain_indexer + should_keep_running, + } } pub fn get_satoshis_per_byte(config: &Config) -> u64 { @@ -215,7 +214,7 @@ impl LeaderBlockCommitFees { let mut fees = LeaderBlockCommitFees::estimated_fees_from_payload(payload, config); fees.spent_in_attempts = cmp::max(1, self.spent_in_attempts); fees.final_size = self.final_size; - fees.fee_rate = self.fee_rate + get_rbf_fee_increment(&config); + fees.fee_rate = self.fee_rate + get_rbf_fee_increment(config); fees.is_rbf_enabled = true; fees } @@ -306,8 +305,7 @@ impl BitcoinRegtestController { burnchain: Option, should_keep_running: Option>, ) -> Self { - std::fs::create_dir_all(&config.get_burnchain_path_str()) - .expect("Unable to create workdir"); + std::fs::create_dir_all(config.get_burnchain_path_str()).expect("Unable to create workdir"); let (_, network_id) = config.burnchain.get_bitcoin_network(); let res = SpvClient::new( @@ -434,11 +432,10 @@ impl BitcoinRegtestController { /// Get the default Burnchain instance from our config fn default_burnchain(&self) -> Burnchain { - let burnchain = match &self.burnchain_config { + match &self.burnchain_config { Some(burnchain) => burnchain.clone(), None => self.config.get_burnchain(), - }; - burnchain + } } /// Get the PoX constants in use @@ -491,7 +488,7 @@ impl BitcoinRegtestController { (None, Some(chain_tip)) => chain_tip.clone(), (Some(state_transition), _) => { let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, + block_snapshot, state_transition: BurnchainStateTransitionOps::from(state_transition), received_at: Instant::now(), }; @@ -501,7 +498,7 @@ impl BitcoinRegtestController { (None, None) => { // can happen at genesis let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, + block_snapshot, state_transition: BurnchainStateTransitionOps::noop(), received_at: Instant::now(), }; @@ -602,8 +599,8 @@ impl BitcoinRegtestController { }; let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, - state_transition: state_transition, + block_snapshot, + state_transition, received_at: Instant::now(), }; @@ -641,11 +638,11 @@ impl BitcoinRegtestController { let filter_addresses = vec![addr2str(&address)]; let pubk = if self.config.miner.segwit { - let mut p = public_key.clone(); + let mut p = *public_key; p.set_compressed(true); p } else { - public_key.clone() + *public_key }; test_debug!("Import public key '{}'", &pubk.to_hex()); @@ -753,11 +750,11 @@ impl BitcoinRegtestController { } let pubk = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { - let mut p = public_key.clone(); + let mut p = *public_key; p.set_compressed(true); p } else { - public_key.clone() + *public_key }; // Configure UTXO filter @@ -1013,7 +1010,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1026,10 +1023,8 @@ impl BitcoinRegtestController { }; tx.output = vec![consensus_output]; - tx.output.push( - PoxAddress::Standard(payload.recipient.clone(), None) - .to_bitcoin_tx_out(DUST_UTXO_LIMIT), - ); + tx.output + .push(PoxAddress::Standard(payload.recipient, None).to_bitcoin_tx_out(DUST_UTXO_LIMIT)); self.finalize_tx( epoch_id, @@ -1099,7 +1094,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1113,8 +1108,7 @@ impl BitcoinRegtestController { tx.output = vec![consensus_output]; tx.output.push( - PoxAddress::Standard(payload.delegate_to.clone(), None) - .to_bitcoin_tx_out(DUST_UTXO_LIMIT), + PoxAddress::Standard(payload.delegate_to, None).to_bitcoin_tx_out(DUST_UTXO_LIMIT), ); self.finalize_tx( @@ -1180,7 +1174,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1271,7 +1265,7 @@ impl BitcoinRegtestController { tx.output = vec![consensus_output]; tx.output - .push(PoxAddress::Standard(payload.output.clone(), None).to_bitcoin_tx_out(output_amt)); + .push(PoxAddress::Standard(payload.output, None).to_bitcoin_tx_out(output_amt)); self.finalize_tx( epoch_id, @@ -1347,7 +1341,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1388,10 +1382,9 @@ impl BitcoinRegtestController { fn magic_bytes(&self) -> Vec { #[cfg(test)] { - if let Some(set_bytes) = TEST_MAGIC_BYTES + if let Some(set_bytes) = *TEST_MAGIC_BYTES .lock() .expect("FATAL: test magic bytes mutex poisoned") - .clone() { return set_bytes.to_vec(); } @@ -1399,6 +1392,7 @@ impl BitcoinRegtestController { self.config.burnchain.magic_bytes.as_bytes().to_vec() } + #[allow(clippy::too_many_arguments)] fn send_block_commit_operation( &mut self, epoch_id: StacksEpochId, @@ -1407,7 +1401,7 @@ impl BitcoinRegtestController { utxos_to_include: Option, utxos_to_exclude: Option, previous_fees: Option, - previous_txids: &Vec, + previous_txids: &[Txid], ) -> Result { let _ = self.sortdb_mut(); let burn_chain_tip = self @@ -1433,6 +1427,7 @@ impl BitcoinRegtestController { ) } + #[allow(clippy::too_many_arguments)] fn send_block_commit_operation_at_burnchain_height( &mut self, epoch_id: StacksEpochId, @@ -1441,7 +1436,7 @@ impl BitcoinRegtestController { utxos_to_include: Option, utxos_to_exclude: Option, mut estimated_fees: LeaderBlockCommitFees, - previous_txids: &Vec, + previous_txids: &[Txid], burnchain_block_height: u64, ) -> Result { let public_key = signer.get_public_key(); @@ -1502,8 +1497,8 @@ impl BitcoinRegtestController { debug!("Transaction relying on UTXOs: {:?}", utxos); let txid = Txid::from_bytes(&txid[..]).unwrap(); - let mut txids = previous_txids.clone(); - txids.push(txid.clone()); + let mut txids = previous_txids.to_vec(); + txids.push(txid); let ongoing_block_commit = OngoingBlockCommit { payload, utxos, @@ -1537,15 +1532,8 @@ impl BitcoinRegtestController { // Are we currently tracking an operation? if self.ongoing_block_commit.is_none() || !self.allow_rbf { // Good to go, let's build the transaction and send it. - let res = self.send_block_commit_operation( - epoch_id, - payload, - signer, - None, - None, - None, - &vec![], - ); + let res = + self.send_block_commit_operation(epoch_id, payload, signer, None, None, None, &[]); return res; } @@ -1574,7 +1562,7 @@ impl BitcoinRegtestController { None, None, None, - &vec![], + &[], ); return res; } else { @@ -1589,13 +1577,13 @@ impl BitcoinRegtestController { .map_err(|_| BurnchainControllerError::BurnchainError)?; let mut found_last_mined_at = false; while traversal_depth < UTXO_CACHE_STALENESS_LIMIT { - if &burn_chain_tip.block_hash == &ongoing_op.utxos.bhh { + if burn_chain_tip.block_hash == ongoing_op.utxos.bhh { found_last_mined_at = true; break; } let parent = BurnchainDB::get_burnchain_block( - &burnchain_db.conn(), + burnchain_db.conn(), &burn_chain_tip.parent_block_hash, ) .map_err(|_| BurnchainControllerError::BurnchainError)?; @@ -1609,15 +1597,8 @@ impl BitcoinRegtestController { "Possible presence of fork or stale UTXO cache, invalidating cached set of UTXOs."; "cached_burn_block_hash" => %ongoing_op.utxos.bhh, ); - let res = self.send_block_commit_operation( - epoch_id, - payload, - signer, - None, - None, - None, - &vec![], - ); + let res = + self.send_block_commit_operation(epoch_id, payload, signer, None, None, None, &[]); return res; } @@ -1659,7 +1640,7 @@ impl BitcoinRegtestController { None, Some(ongoing_op.utxos.clone()), None, - &vec![], + &[], ) } else { // Case 2) ii): Attempt to RBF @@ -1724,9 +1705,9 @@ impl BitcoinRegtestController { } else { // Fetch some UTXOs let addr = self.get_miner_address(epoch_id, public_key); - let utxos = match self.get_utxos( + match self.get_utxos( epoch_id, - &public_key, + public_key, total_required, utxos_to_exclude, block_height, @@ -1741,8 +1722,7 @@ impl BitcoinRegtestController { ); return Err(BurnchainControllerError::NoUTXOs); } - }; - utxos + } }; // Prepare a backbone for the tx @@ -1756,6 +1736,7 @@ impl BitcoinRegtestController { Ok((transaction, utxos)) } + #[allow(clippy::too_many_arguments)] fn finalize_tx( &mut self, epoch_id: StacksEpochId, @@ -1884,7 +1865,7 @@ impl BitcoinRegtestController { debug!("Not enough change to clear dust limit. Not adding change address."); } - for (_i, utxo) in utxos_set.utxos.iter().enumerate() { + for utxo in utxos_set.utxos.iter() { let input = TxIn { previous_output: OutPoint { txid: utxo.txid, @@ -2118,7 +2099,7 @@ impl BitcoinRegtestController { } }; - transaction.map(|tx| SerializedTx::new(tx)) + transaction.map(SerializedTx::new) } #[cfg(test)] @@ -2139,7 +2120,7 @@ impl BitcoinRegtestController { for pk in pks { debug!("Import public key '{}'", &pk.to_hex()); - if let Err(e) = BitcoinRPCRequest::import_public_key(&self.config, &pk) { + if let Err(e) = BitcoinRPCRequest::import_public_key(&self.config, pk) { warn!("Error when importing pubkey: {e:?}"); } } @@ -2165,7 +2146,7 @@ impl BitcoinRegtestController { // otherwise, round robin generate blocks for i in 0..num_blocks { - let pk = &pks[usize::try_from(i % pks.len()).unwrap()]; + let pk = &pks[i % pks.len()]; let address = self.get_miner_address(StacksEpochId::Epoch21, pk); if i < pks.len() { debug!( @@ -2249,10 +2230,7 @@ impl BurnchainController for BitcoinRegtestController { target_block_height_opt: Option, ) -> Result<(BurnchainTip, u64), BurnchainControllerError> { // if no target block height is given, just fetch the first burnchain block. - self.receive_blocks( - false, - target_block_height_opt.map_or_else(|| Some(1), |x| Some(x)), - ) + self.receive_blocks(false, target_block_height_opt.map_or_else(|| Some(1), Some)) } fn sync( @@ -2351,13 +2329,13 @@ impl SerializedTx { } pub fn txid(&self) -> Txid { - self.txid.clone() + self.txid } pub fn to_hex(&self) -> String { let formatted_bytes: Vec = self.bytes.iter().map(|b| format!("{:02x}", b)).collect(); - format!("{}", formatted_bytes.join("")) + formatted_bytes.join("").to_string() } } @@ -2419,7 +2397,7 @@ impl ParsedUTXO { } (lhs, rhs) => { warn!("Error while converting BTC to sat {:?} - {:?}", lhs, rhs); - return None; + None } } } @@ -2516,13 +2494,12 @@ impl BitcoinRPCRequest { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); - match (&config.burnchain.username, &config.burnchain.password) { - (Some(username), Some(password)) => { - let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); - request.add_header("Authorization".into(), auth_token); - } - (_, _) => {} - }; + if let (Some(username), Some(password)) = + (&config.burnchain.username, &config.burnchain.password) + { + let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); + request.add_header("Authorization".into(), auth_token); + } request } @@ -2535,7 +2512,7 @@ impl BitcoinRPCRequest { id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; + let res = BitcoinRPCRequest::send(config, payload)?; debug!("Got raw transaction {}: {:?}", txid, &res); Ok(res.get("result").unwrap().as_str().unwrap().to_string()) } @@ -2548,7 +2525,7 @@ impl BitcoinRPCRequest { id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; + let res = BitcoinRPCRequest::send(config, payload)?; let confirmations = res .get("result") .ok_or_else(|| RPCError::Parsing("No 'result' field in bitcoind RPC response".into()))? @@ -2575,7 +2552,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; + let res = BitcoinRPCRequest::send(config, payload)?; debug!( "Generated {} blocks to {}: {:?}", num_blocks, &address, &res @@ -2598,21 +2575,17 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; - let bhh = match res.as_object_mut() { - Some(res) => { - let res = res - .get("result") - .ok_or(RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - let bhh: String = serde_json::from_value(res.to_owned()) - .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - let bhh = BurnchainHeaderHash::from_hex(&bhh) - .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - bhh - } - _ => return Err(RPCError::Parsing("Failed to get UTXOs".to_string())), + let mut res = BitcoinRPCRequest::send(config, payload)?; + let Some(res) = res.as_object_mut() else { + return Err(RPCError::Parsing("Failed to get UTXOs".to_string())); }; - + let res = res + .get("result") + .ok_or(RPCError::Parsing("Failed to get bestblockhash".to_string()))?; + let bhh_string: String = serde_json::from_value(res.to_owned()) + .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; + let bhh = BurnchainHeaderHash::from_hex(&bhh_string) + .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; let min_conf = 0i64; let max_conf = 9999999i64; let minimum_amount = ParsedUTXO::sat_to_serialized_btc(minimum_sum_amount); @@ -2630,7 +2603,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; + let mut res = BitcoinRPCRequest::send(config, payload)?; let txids_to_filter = if let Some(utxos_to_exclude) = utxos_to_exclude { utxos_to_exclude .utxos @@ -2710,7 +2683,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let json_resp = BitcoinRPCRequest::send(&config, payload)?; + let json_resp = BitcoinRPCRequest::send(config, payload)?; if let Some(e) = json_resp.get("error") { if !e.is_null() { @@ -2756,9 +2729,9 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let result = BitcoinRPCRequest::send(&config, payload)?; + let result = BitcoinRPCRequest::send(config, payload)?; let checksum = result - .get(&"result".to_string()) + .get("result") .and_then(|res| res.as_object()) .and_then(|obj| obj.get("checksum")) .and_then(|checksum_val| checksum_val.as_str()) @@ -2776,7 +2749,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - BitcoinRPCRequest::send(&config, payload)?; + BitcoinRPCRequest::send(config, payload)?; } Ok(()) } @@ -2790,7 +2763,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; + let mut res = BitcoinRPCRequest::send(config, payload)?; let mut wallets = Vec::new(); match res.as_object_mut() { Some(ref mut object) => match object.get_mut("result") { @@ -2828,12 +2801,12 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - BitcoinRPCRequest::send(&config, payload)?; + BitcoinRPCRequest::send(config, payload)?; Ok(()) } pub fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { - let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); + let request = BitcoinRPCRequest::build_rpc_request(config, &payload); let timeout = Duration::from_secs(u64::from(config.burnchain.timeout)); let host = request.preamble().host.hostname(); @@ -2841,9 +2814,9 @@ impl BitcoinRPCRequest { let response = send_http_request(&host, port, request, timeout)?; if let HttpResponsePayload::JSON(js) = response.destruct().1 { - return Ok(js); + Ok(js) } else { - return Err(RPCError::Parsing("Did not get a JSON response".into())); + Err(RPCError::Parsing("Did not get a JSON response".into())) } } } @@ -3025,7 +2998,7 @@ mod tests { Some(utxo_set), None, leader_fees, - &vec![], + &[], 2212, ) .unwrap(); diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index d518f5bdea..a626cfb443 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -44,8 +44,8 @@ impl MocknetController { let burnchain = config.get_burnchain(); Self { - config: config, - burnchain: burnchain, + config, + burnchain, db: None, queued_operations: VecDeque::new(), chain_tip: None, @@ -54,7 +54,7 @@ impl MocknetController { fn build_next_block_header(current_block: &BlockSnapshot) -> BurnchainBlockHeader { let curr_hash = ¤t_block.burn_header_hash.to_bytes()[..]; - let next_hash = Sha256Sum::from_data(&curr_hash); + let next_hash = Sha256Sum::from_data(curr_hash); let block = BurnchainBlock::Bitcoin(BitcoinBlock::new( current_block.block_height + 1, diff --git a/testnet/stacks-node/src/chain_data.rs b/testnet/stacks-node/src/chain_data.rs index b1e32c15ea..c7fdaf6cee 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/testnet/stacks-node/src/chain_data.rs @@ -81,7 +81,7 @@ impl MinerStats { { commits_at_sortition.push(missed); } else { - missed_commits_map.insert(missed.intended_sortition.clone(), vec![missed]); + missed_commits_map.insert(missed.intended_sortition, vec![missed]); } } @@ -106,8 +106,7 @@ impl MinerStats { &sortition_id, )?; if let Some(missed_commit_in_block) = missed_commits_map.remove(&sortition_id) { - missed_commits_at_height - .extend(missed_commit_in_block.into_iter().map(|x| x.clone())); + missed_commits_at_height.extend(missed_commit_in_block.into_iter().cloned()); } windowed_missed_commits.push(missed_commits_at_height); @@ -223,7 +222,7 @@ impl MinerStats { all_miners: &[&str], ) -> Result, String> { let (exit_code, stdout, _stderr) = - Self::run_subprocess(&self.unconfirmed_commits_helper, &all_miners)?; + Self::run_subprocess(&self.unconfirmed_commits_helper, all_miners)?; if exit_code != 0 { return Err(format!( "Failed to run `{}`: exit code {}", @@ -255,7 +254,7 @@ impl MinerStats { }; let mut decoded_pox_addrs = vec![]; for pox_addr_hex in unconfirmed_commit.pox_addrs.iter() { - let Ok(pox_addr_bytes) = hex_bytes(&pox_addr_hex) else { + let Ok(pox_addr_bytes) = hex_bytes(pox_addr_hex) else { return Err(format!("Not a hex string: `{}`", &pox_addr_hex)); }; let Some(bitcoin_addr) = @@ -279,8 +278,8 @@ impl MinerStats { let mocked_commit = LeaderBlockCommitOp { treatment: vec![], sunset_burn: 0, - block_header_hash: BlockHeaderHash(DEADBEEF.clone()), - new_seed: VRFSeed(DEADBEEF.clone()), + block_header_hash: BlockHeaderHash(DEADBEEF), + new_seed: VRFSeed(DEADBEEF), parent_block_ptr: 1, parent_vtxindex: 1, key_block_ptr: 1, @@ -295,7 +294,7 @@ impl MinerStats { block_height: next_block_height, burn_parent_modulus: ((next_block_height.saturating_sub(1)) % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: BurnchainHeaderHash(DEADBEEF.clone()), + burn_header_hash: BurnchainHeaderHash(DEADBEEF), }; unconfirmed_spends.push(mocked_commit); @@ -306,7 +305,7 @@ impl MinerStats { /// Convert a list of burn sample points into a probability distribution by candidate's /// apparent sender (e.g. miner address). pub fn burn_dist_to_prob_dist(burn_dist: &[BurnSamplePoint]) -> HashMap { - if burn_dist.len() == 0 { + if burn_dist.is_empty() { return HashMap::new(); } if burn_dist.len() == 1 { @@ -343,13 +342,11 @@ impl MinerStats { if commit.commit_outs.len() != expected_pox_addrs.len() { return false; } - for i in 0..commit.commit_outs.len() { - if commit.commit_outs[i].to_burnchain_repr() - != expected_pox_addrs[i].to_burnchain_repr() - { + for (i, commit_out) in commit.commit_outs.iter().enumerate() { + if commit_out.to_burnchain_repr() != expected_pox_addrs[i].to_burnchain_repr() { info!( "Skipping invalid unconfirmed block-commit: {:?} != {:?}", - &commit.commit_outs[i].to_burnchain_repr(), + &commit_out.to_burnchain_repr(), expected_pox_addrs[i].to_burnchain_repr() ); return false; @@ -391,7 +388,7 @@ impl MinerStats { let (dist, total_spend) = Self::get_spend_distribution( active_miners_and_commits, unconfirmed_block_commits, - &expected_pox_addrs, + expected_pox_addrs, ); let mut probs = HashMap::new(); @@ -444,8 +441,8 @@ impl MinerStats { let mocked_commit = LeaderBlockCommitOp { treatment: vec![], sunset_burn: 0, - block_header_hash: BlockHeaderHash(DEADBEEF.clone()), - new_seed: VRFSeed(DEADBEEF.clone()), + block_header_hash: BlockHeaderHash(DEADBEEF), + new_seed: VRFSeed(DEADBEEF), parent_block_ptr: 2, parent_vtxindex: 2, key_block_ptr: 2, @@ -455,13 +452,13 @@ impl MinerStats { burn_fee: last_commit.burn_fee, input: (last_commit.txid, expected_input_index), apparent_sender: last_commit.apparent_sender.clone(), - txid: Txid(DEADBEEF.clone()), + txid: Txid(DEADBEEF), vtxindex: 1, block_height: next_block_height, burn_parent_modulus: ((next_block_height.saturating_sub(1)) % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: BurnchainHeaderHash(DEADBEEF.clone()), + burn_header_hash: BurnchainHeaderHash(DEADBEEF), }; commit_table.insert(miner.to_string(), mocked_commit); } @@ -473,13 +470,11 @@ impl MinerStats { if commit.commit_outs.len() != expected_pox_addrs.len() { return false; } - for i in 0..commit.commit_outs.len() { - if commit.commit_outs[i].to_burnchain_repr() - != expected_pox_addrs[i].to_burnchain_repr() - { + for (i, commit_out) in commit.commit_outs.iter().enumerate() { + if commit_out.to_burnchain_repr() != expected_pox_addrs[i].to_burnchain_repr() { info!( "Skipping invalid unconfirmed block-commit: {:?} != {:?}", - &commit.commit_outs[i].to_burnchain_repr(), + &commit_out.to_burnchain_repr(), expected_pox_addrs[i].to_burnchain_repr() ); return false; @@ -520,9 +515,7 @@ impl MinerStats { SortitionDB::get_block_commits_by_block(sortdb.conn(), &tip.sortition_id)?; for commit in commits.into_iter() { let miner = commit.apparent_sender.to_string(); - if miners.get(&miner).is_none() { - miners.insert(miner, commit); - } + miners.entry(miner).or_insert(commit); } tip = SortitionDB::get_block_snapshot(sortdb.conn(), &tip.parent_sortition_id)? .ok_or(DBError::NotFoundError)?; @@ -750,11 +743,11 @@ echo < Result { let mut config: ConfigFile = toml::from_str(content).map_err(|e| format!("Invalid toml: {}", e))?; @@ -367,7 +368,7 @@ impl Config { let Ok(config) = Config::from_config_file(config_file, false) else { return self.miner.clone(); }; - return config.miner; + config.miner } pub fn get_node_config(&self, resolve_bootstrap_nodes: bool) -> NodeConfig { @@ -380,7 +381,7 @@ impl Config { let Ok(config) = Config::from_config_file(config_file, resolve_bootstrap_nodes) else { return self.node.clone(); }; - return config.node; + config.node } /// Apply any test settings to this burnchain config struct @@ -411,7 +412,7 @@ impl Config { "Override first_burn_block_hash from {} to {}", burnchain.first_block_hash, first_burn_block_hash ); - burnchain.first_block_hash = BurnchainHeaderHash::from_hex(&first_burn_block_hash) + burnchain.first_block_hash = BurnchainHeaderHash::from_hex(first_burn_block_hash) .expect("Invalid first_burn_block_hash"); } @@ -525,7 +526,7 @@ impl Config { } // check if the Epoch 3.0 burnchain settings as configured are going to be valid. - self.check_nakamoto_config(&burnchain); + self.check_nakamoto_config(burnchain); } fn check_nakamoto_config(&self, burnchain: &Burnchain) { @@ -612,7 +613,7 @@ impl Config { let _ = StacksEpoch::validate_epochs(epochs); // sanity check: v1_unlock_height must happen after pox-2 instantiation - let epoch21_index = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch21) + let epoch21_index = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch21) .expect("FATAL: no epoch 2.1 defined"); let epoch21 = &epochs[epoch21_index]; @@ -810,7 +811,7 @@ impl Config { } if burnchain.mode == "helium" && burnchain.local_mining_public_key.is_none() { - return Err(format!("Config is missing the setting `burnchain.local_mining_public_key` (mandatory for helium)")); + return Err("Config is missing the setting `burnchain.local_mining_public_key` (mandatory for helium)".into()); } let is_mainnet = burnchain.mode == "mainnet"; @@ -834,27 +835,17 @@ impl Config { burnchain.peer_version, ); } - } else { - if is_mainnet && resolve_bootstrap_nodes { - let bootstrap_node = ConfigFile::mainnet().node.unwrap().bootstrap_node.unwrap(); - node.set_bootstrap_nodes( - bootstrap_node, - burnchain.chain_id, - burnchain.peer_version, - ); - } + } else if is_mainnet && resolve_bootstrap_nodes { + let bootstrap_node = ConfigFile::mainnet().node.unwrap().bootstrap_node.unwrap(); + node.set_bootstrap_nodes(bootstrap_node, burnchain.chain_id, burnchain.peer_version); } if let Some(deny_nodes) = deny_nodes { node.set_deny_nodes(deny_nodes, burnchain.chain_id, burnchain.peer_version); } // Validate the node config - if is_mainnet { - if node.use_test_genesis_chainstate == Some(true) { - return Err(format!( - "Attempted to run mainnet node with `use_test_genesis_chainstate`" - )); - } + if is_mainnet && node.use_test_genesis_chainstate == Some(true) { + return Err("Attempted to run mainnet node with `use_test_genesis_chainstate`".into()); } if node.stacker || node.miner { @@ -869,10 +860,10 @@ impl Config { let initial_balances: Vec = match config_file.ustx_balance { Some(balances) => { - if is_mainnet && balances.len() > 0 { - return Err(format!( - "Attempted to run mainnet node with specified `initial_balances`" - )); + if is_mainnet && !balances.is_empty() { + return Err( + "Attempted to run mainnet node with specified `initial_balances`".into(), + ); } balances .iter() @@ -913,16 +904,12 @@ impl Config { }; // check for observer config in env vars - match std::env::var("STACKS_EVENT_OBSERVER") { - Ok(val) => { - events_observers.insert(EventObserverConfig { - endpoint: val, - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1_000, - }); - () - } - _ => (), + if let Ok(val) = std::env::var("STACKS_EVENT_OBSERVER") { + events_observers.insert(EventObserverConfig { + endpoint: val, + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1_000, + }); }; let connection_options = match config_file.connection_options { @@ -1070,14 +1057,11 @@ impl Config { } pub fn is_mainnet(&self) -> bool { - match self.burnchain.mode.as_str() { - "mainnet" => true, - _ => false, - } + matches!(self.burnchain.mode.as_str(), "mainnet") } pub fn is_node_event_driven(&self) -> bool { - self.events_observers.len() > 0 + !self.events_observers.is_empty() } pub fn make_nakamoto_block_builder_settings( @@ -1157,12 +1141,11 @@ impl Config { /// part dependent on the state machine getting block data back to the miner quickly, and thus /// the poll time is dependent on the first attempt time. pub fn get_poll_time(&self) -> u64 { - let poll_timeout = if self.node.miner { + if self.node.miner { cmp::min(1000, self.miner.first_attempt_time_ms / 2) } else { 1000 - }; - poll_timeout + } } } @@ -1253,7 +1236,7 @@ impl BurnchainConfig { username: None, password: None, timeout: 60, - magic_bytes: BLOCKSTACK_MAGIC_MAINNET.clone(), + magic_bytes: BLOCKSTACK_MAGIC_MAINNET, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: 10, // TODO: this is a testnet specific value. @@ -1298,8 +1281,7 @@ impl BurnchainConfig { let mut addrs_iter = format!("{}:{}", self.peer_host, self.rpc_port) .to_socket_addrs() .unwrap(); - let sock_addr = addrs_iter.next().unwrap(); - sock_addr + addrs_iter.next().unwrap() } pub fn get_bitcoin_network(&self) -> (String, BitcoinNetworkType) { @@ -1320,15 +1302,15 @@ pub struct StacksEpochConfigFile { start_height: i64, } -pub const EPOCH_CONFIG_1_0_0: &'static str = "1.0"; -pub const EPOCH_CONFIG_2_0_0: &'static str = "2.0"; -pub const EPOCH_CONFIG_2_0_5: &'static str = "2.05"; -pub const EPOCH_CONFIG_2_1_0: &'static str = "2.1"; -pub const EPOCH_CONFIG_2_2_0: &'static str = "2.2"; -pub const EPOCH_CONFIG_2_3_0: &'static str = "2.3"; -pub const EPOCH_CONFIG_2_4_0: &'static str = "2.4"; -pub const EPOCH_CONFIG_2_5_0: &'static str = "2.5"; -pub const EPOCH_CONFIG_3_0_0: &'static str = "3.0"; +pub const EPOCH_CONFIG_1_0_0: &str = "1.0"; +pub const EPOCH_CONFIG_2_0_0: &str = "2.0"; +pub const EPOCH_CONFIG_2_0_5: &str = "2.05"; +pub const EPOCH_CONFIG_2_1_0: &str = "2.1"; +pub const EPOCH_CONFIG_2_2_0: &str = "2.2"; +pub const EPOCH_CONFIG_2_3_0: &str = "2.3"; +pub const EPOCH_CONFIG_2_4_0: &str = "2.4"; +pub const EPOCH_CONFIG_2_5_0: &str = "2.5"; +pub const EPOCH_CONFIG_3_0_0: &str = "3.0"; #[derive(Clone, Deserialize, Default, Debug)] pub struct AffirmationOverride { @@ -1978,9 +1960,8 @@ impl NodeConfig { /// Get a SocketAddr for this node's RPC endpoint which uses the loopback address pub fn get_rpc_loopback(&self) -> Option { let rpc_port = SocketAddr::from_str(&self.rpc_bind) - .or_else(|e| { + .map_err(|e| { error!("Could not parse node.rpc_bind configuration setting as SocketAddr: {e}"); - Err(()) }) .ok()? .port(); @@ -2090,8 +2071,8 @@ impl NodeConfig { peer_version: u32, ) { for part in bootstrap_nodes.split(',') { - if part.len() > 0 { - self.add_bootstrap_node(&part, chain_id, peer_version); + if !part.is_empty() { + self.add_bootstrap_node(part, chain_id, peer_version); } } } @@ -2109,8 +2090,8 @@ impl NodeConfig { pub fn set_deny_nodes(&mut self, deny_nodes: String, chain_id: u32, peer_version: u32) { for part in deny_nodes.split(',') { - if part.len() > 0 { - self.add_deny_node(&part, chain_id, peer_version); + if !part.is_empty() { + self.add_deny_node(part, chain_id, peer_version); } } } @@ -2124,10 +2105,7 @@ impl NodeConfig { MARFOpenOpts::new( hash_mode, - &self - .marf_cache_strategy - .as_ref() - .unwrap_or(&"noop".to_string()), + self.marf_cache_strategy.as_deref().unwrap_or("noop"), false, ) } @@ -2288,21 +2266,21 @@ impl ConnectionOptionsFile { let mut read_only_call_limit = HELIUM_DEFAULT_CONNECTION_OPTIONS .read_only_call_limit .clone(); - self.read_only_call_limit_write_length.map(|x| { + if let Some(x) = self.read_only_call_limit_write_length { read_only_call_limit.write_length = x; - }); - self.read_only_call_limit_write_count.map(|x| { + } + if let Some(x) = self.read_only_call_limit_write_count { read_only_call_limit.write_count = x; - }); - self.read_only_call_limit_read_length.map(|x| { + } + if let Some(x) = self.read_only_call_limit_read_length { read_only_call_limit.read_length = x; - }); - self.read_only_call_limit_read_count.map(|x| { + } + if let Some(x) = self.read_only_call_limit_read_count { read_only_call_limit.read_count = x; - }); - self.read_only_call_limit_runtime.map(|x| { + } + if let Some(x) = self.read_only_call_limit_runtime { read_only_call_limit.runtime = x; - }); + }; let default = ConnectionOptions::default(); Ok(ConnectionOptions { read_only_call_limit, @@ -2353,7 +2331,7 @@ impl ConnectionOptionsFile { .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.soft_max_clients_per_host), walk_interval: self .walk_interval - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_interval.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_interval), walk_seed_probability: self .walk_seed_probability .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_seed_probability), @@ -2375,7 +2353,7 @@ impl ConnectionOptionsFile { .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.maximum_call_argument_size), download_interval: self .download_interval - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.download_interval.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.download_interval), inv_sync_interval: self .inv_sync_interval .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.inv_sync_interval), @@ -2396,7 +2374,7 @@ impl ConnectionOptionsFile { force_disconnect_interval: self.force_disconnect_interval, max_http_clients: self .max_http_clients - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.max_http_clients.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.max_http_clients), connect_timeout: self.connect_timeout.unwrap_or(10), handshake_timeout: self.handshake_timeout.unwrap_or(5), max_sockets: self.max_sockets.unwrap_or(800) as usize, @@ -2457,7 +2435,7 @@ impl NodeConfigFile { name: self.name.unwrap_or(default_node_config.name), seed: match self.seed { Some(seed) => hex_bytes(&seed) - .map_err(|_e| format!("node.seed should be a hex encoded string"))?, + .map_err(|_e| "node.seed should be a hex encoded string".to_string())?, None => default_node_config.seed, }, working_dir: std::env::var("STACKS_WORKING_DIR") @@ -2471,8 +2449,9 @@ impl NodeConfigFile { .data_url .unwrap_or_else(|| format!("http://{rpc_bind}")), local_peer_seed: match self.local_peer_seed { - Some(seed) => hex_bytes(&seed) - .map_err(|_e| format!("node.local_peer_seed should be a hex encoded string"))?, + Some(seed) => hex_bytes(&seed).map_err(|_e| { + "node.local_peer_seed should be a hex encoded string".to_string() + })?, None => default_node_config.local_peer_seed, }, miner, @@ -2527,7 +2506,7 @@ impl NodeConfigFile { .unwrap_or(default_node_config.chain_liveness_poll_time_secs), stacker_dbs: self .stacker_dbs - .unwrap_or(vec![]) + .unwrap_or_default() .iter() .filter_map(|contract_id| QualifiedContractIdentifier::parse(contract_id).ok()) .collect(), @@ -2714,6 +2693,7 @@ pub struct AtlasConfigFile { impl AtlasConfigFile { // Can't inplement `Into` trait because this takes a parameter + #[allow(clippy::wrong_self_convention)] fn into_config(&self, mainnet: bool) -> AtlasConfig { let mut conf = AtlasConfig::new(mainnet); if let Some(val) = self.attachments_max_size { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index bb05cd6128..dd587077a6 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -181,6 +181,12 @@ impl InnerStackerDBChannel { } } +impl Default for StackerDBChannel { + fn default() -> Self { + Self::new() + } +} + impl StackerDBChannel { pub const fn new() -> Self { Self { @@ -256,7 +262,7 @@ where serializer.serialize_str(&value.to_string()) } -fn serialize_pox_addresses(value: &Vec, serializer: S) -> Result +fn serialize_pox_addresses(value: &[PoxAddress], serializer: S) -> Result where S: serde::Serializer, { @@ -402,8 +408,8 @@ impl EventObserver { let id: i64 = row.get(0)?; let url: String = row.get(1)?; let payload_text: String = row.get(2)?; - let payload: serde_json::Value = serde_json::from_str(&payload_text) - .map_err(|e| db_error::SerializationError(e))?; + let payload: serde_json::Value = + serde_json::from_str(&payload_text).map_err(db_error::SerializationError)?; let timeout_ms: u64 = row.get(3)?; Ok((id, url, payload, timeout_ms)) }, @@ -642,7 +648,7 @@ impl EventObserver { TransactionOrigin::Burn(op) => ( op.txid().to_string(), "00".to_string(), - BlockstackOperationType::blockstack_op_to_json(&op), + BlockstackOperationType::blockstack_op_to_json(op), ), TransactionOrigin::Stacks(ref tx) => { let txid = tx.txid().to_string(); @@ -776,6 +782,7 @@ impl EventObserver { self.send_payload(payload, PATH_BURN_BLOCK_SUBMIT); } + #[allow(clippy::too_many_arguments)] fn make_new_block_processed_payload( &self, filtered_events: Vec<(usize, &(bool, Txid, &StacksTransactionEvent))>, @@ -806,12 +813,15 @@ impl EventObserver { }) .collect(); - let mut tx_index: u32 = 0; let mut serialized_txs = vec![]; - for receipt in receipts.iter() { - let payload = EventObserver::make_new_block_txs_payload(receipt, tx_index); + for (tx_index, receipt) in receipts.iter().enumerate() { + let payload = EventObserver::make_new_block_txs_payload( + receipt, + tx_index + .try_into() + .expect("BUG: more receipts than U32::MAX"), + ); serialized_txs.push(payload); - tx_index += 1; } let signer_bitvec_value = signer_bitvec_opt @@ -821,7 +831,7 @@ impl EventObserver { let (reward_set_value, cycle_number_value) = match &reward_set_data { Some(data) => ( - serde_json::to_value(&RewardSetEventPayload::from_reward_set(&data.reward_set)) + serde_json::to_value(RewardSetEventPayload::from_reward_set(&data.reward_set)) .unwrap_or_default(), serde_json::to_value(data.cycle_number).unwrap_or_default(), ), @@ -1097,6 +1107,12 @@ impl BlockEventDispatcher for EventDispatcher { } } +impl Default for EventDispatcher { + fn default() -> Self { + EventDispatcher::new() + } +} + impl EventDispatcher { pub fn new() -> EventDispatcher { EventDispatcher { @@ -1125,7 +1141,7 @@ impl EventDispatcher { ) { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.burn_block_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1149,6 +1165,7 @@ impl EventDispatcher { /// - dispatch_matrix: a vector where each index corresponds to the hashset of event indexes /// that each respective event observer is subscribed to /// - events: a vector of all events from all the tx receipts + #[allow(clippy::type_complexity)] fn create_dispatch_matrix_and_event_vector<'a>( &self, receipts: &'a Vec, @@ -1241,6 +1258,7 @@ impl EventDispatcher { (dispatch_matrix, events) } + #[allow(clippy::too_many_arguments)] pub fn process_chain_tip( &self, block: &StacksBlockEventData, @@ -1264,7 +1282,7 @@ impl EventDispatcher { let all_receipts = receipts.to_owned(); let (dispatch_matrix, events) = self.create_dispatch_matrix_and_event_vector(&all_receipts); - if dispatch_matrix.len() > 0 { + if !dispatch_matrix.is_empty() { let mature_rewards_vec = if let Some(rewards_info) = mature_rewards_info { mature_rewards .iter() @@ -1297,7 +1315,7 @@ impl EventDispatcher { let payload = self.registered_observers[observer_id] .make_new_block_processed_payload( filtered_events, - &block, + block, metadata, receipts, parent_index_hash, @@ -1342,7 +1360,7 @@ impl EventDispatcher { ) }) .collect(); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } let flattened_receipts = processed_unconfirmed_state @@ -1390,12 +1408,12 @@ impl EventDispatcher { .enumerate() .filter_map(|(obs_id, observer)| { let lookup_ix = u16::try_from(obs_id).expect("FATAL: more than 2^16 observers"); - if lookup.contains(&lookup_ix) { - return Some(observer); - } else if include_any && self.any_event_observers_lookup.contains(&lookup_ix) { - return Some(observer); + if lookup.contains(&lookup_ix) + || (include_any && self.any_event_observers_lookup.contains(&lookup_ix)) + { + Some(observer) } else { - return None; + None } }) .collect() @@ -1405,7 +1423,7 @@ impl EventDispatcher { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.mempool_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1427,7 +1445,7 @@ impl EventDispatcher { ) { let interested_observers = self.filter_observers(&self.miner_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1456,7 +1474,7 @@ impl EventDispatcher { ) { let interested_observers = self.filter_observers(&self.mined_microblocks_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1483,7 +1501,7 @@ impl EventDispatcher { tx_events: Vec, ) { let interested_observers = self.filter_observers(&self.miner_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1502,7 +1520,7 @@ impl EventDispatcher { block_size: block_size_bytes, cost: consumed.clone(), tx_events, - miner_signature: block.header.miner_signature.clone(), + miner_signature: block.header.miner_signature, signer_signature_hash: block.header.signer_signature_hash(), signer_signature: block.header.signer_signature.clone(), signer_bitvec, @@ -1558,7 +1576,7 @@ impl EventDispatcher { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.mempool_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1577,9 +1595,9 @@ impl EventDispatcher { } } - pub fn process_new_attachments(&self, attachments: &Vec<(AttachmentInstance, Attachment)>) { + pub fn process_new_attachments(&self, attachments: &[(AttachmentInstance, Attachment)]) { let interested_observers: Vec<_> = self.registered_observers.iter().enumerate().collect(); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1598,7 +1616,7 @@ impl EventDispatcher { &self, asset_identifier: &AssetIdentifier, event_index: usize, - dispatch_matrix: &mut Vec>, + dispatch_matrix: &mut [HashSet], ) { if let Some(observer_indexes) = self.assets_observers_lookup.get(asset_identifier) { for o_i in observer_indexes { diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index b1ddf2e82b..3e527e76e4 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -23,6 +23,7 @@ use crate::TipCandidate; pub type NeonGlobals = Globals; /// Command types for the relayer thread, issued to it by other threads +#[allow(clippy::large_enum_variant)] pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and HandleNetResult(NetworkResult), @@ -99,6 +100,7 @@ impl Clone for Globals { } impl Globals { + #[allow(clippy::too_many_arguments)] pub fn new( coord_comms: CoordinatorChannels, miner_status: Arc>, @@ -289,8 +291,8 @@ impl Globals { let active_key = RegisteredKey { target_block_height, vrf_public_key: op.public_key, - block_height: op.block_height as u64, - op_vtxindex: op.vtxindex as u32, + block_height: op.block_height, + op_vtxindex: op.vtxindex, memo: op.memo, }; @@ -450,10 +452,7 @@ impl Globals { /// Clear the initiative flag and return its value pub fn take_initiative(&self) -> Option { match self.initiative.lock() { - Ok(mut initiative) => { - let ret = (*initiative).take(); - ret - } + Ok(mut initiative) => (*initiative).take(), Err(_e) => { error!("FATAL: failed to lock initiative"); panic!(); diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index b6df8549c4..9402ebbad5 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -123,10 +123,7 @@ impl Keychain { let proof = VRF::prove(&sk, bytes.as_ref()); // Ensure that the proof is valid by verifying - let is_valid = match VRF::verify(&pk, &proof, bytes.as_ref()) { - Ok(v) => v, - Err(_) => false, - }; + let is_valid = VRF::verify(&pk, &proof, bytes.as_ref()).unwrap_or(false); assert!(is_valid); proof } @@ -178,7 +175,7 @@ impl Keychain { } /// Sign a transaction as if we were the origin - pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () { + pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) { let sk = self.get_secret_key(); tx_signer .sign_origin(&sk) @@ -333,7 +330,7 @@ mod tests { } }; sk.set_compress_public(true); - self.microblocks_secret_keys.push(sk.clone()); + self.microblocks_secret_keys.push(sk); debug!("Microblock keypair rotated"; "burn_block_height" => %burn_block_height, @@ -346,7 +343,7 @@ mod tests { self.microblocks_secret_keys.last().cloned() } - pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () { + pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) { let num_keys = if self.secret_keys.len() < self.threshold as usize { self.secret_keys.len() } else { @@ -370,12 +367,9 @@ mod tests { }; // Generate the proof - let proof = VRF::prove(&vrf_sk, bytes.as_ref()); + let proof = VRF::prove(vrf_sk, bytes.as_ref()); // Ensure that the proof is valid by verifying - let is_valid = match VRF::verify(vrf_pk, &proof, bytes.as_ref()) { - Ok(v) => v, - Err(_) => false, - }; + let is_valid = VRF::verify(vrf_pk, &proof, bytes.as_ref()).unwrap_or(false); assert!(is_valid); Some(proof) } @@ -385,7 +379,7 @@ mod tests { let public_keys = self .secret_keys .iter() - .map(|ref pk| StacksPublicKey::from_private(pk)) + .map(StacksPublicKey::from_private) .collect(); let version = if is_mainnet { self.hash_mode.to_version_mainnet() @@ -518,7 +512,7 @@ mod tests { TransactionVersion::Testnet, k1.get_transaction_auth().unwrap(), TransactionPayload::TokenTransfer( - recv_addr.clone().into(), + recv_addr.into(), 123, TokenTransferMemo([0u8; 34]), ), @@ -527,7 +521,7 @@ mod tests { TransactionVersion::Testnet, k2.get_transaction_auth().unwrap(), TransactionPayload::TokenTransfer( - recv_addr.clone().into(), + recv_addr.into(), 123, TokenTransferMemo([0u8; 34]), ), diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index fcdc9f5847..e795101c94 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -93,18 +93,18 @@ fn cli_pick_best_tip(config_path: &str, at_stacks_height: Option) -> TipCan at_stacks_height, ); - let best_tip = BlockMinerThread::inner_pick_best_tip(stacks_tips, HashMap::new()).unwrap(); - best_tip + BlockMinerThread::inner_pick_best_tip(stacks_tips, HashMap::new()).unwrap() } /// Implementation of `get_miner_spend` CLI option +#[allow(clippy::incompatible_msrv)] fn cli_get_miner_spend( config_path: &str, mine_start: Option, at_burnchain_height: Option, ) -> u64 { info!("Loading config at path {}", config_path); - let config = match ConfigFile::from_path(&config_path) { + let config = match ConfigFile::from_path(config_path) { Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { warn!("Invalid config file: {}", e); @@ -155,7 +155,7 @@ fn cli_get_miner_spend( &config, &keychain, &burnchain, - &mut sortdb, + &sortdb, &commit_outs, mine_start.unwrap_or(tip.block_height), at_burnchain_height, @@ -171,7 +171,7 @@ fn cli_get_miner_spend( else { return 0.0; }; - if active_miners_and_commits.len() == 0 { + if active_miners_and_commits.is_empty() { warn!("No active miners detected; using config file burn_fee_cap"); return 0.0; } @@ -207,12 +207,11 @@ fn cli_get_miner_spend( ); let win_probs = if config.miner.fast_rampup { // look at spends 6+ blocks in the future - let win_probs = MinerStats::get_future_win_distribution( + MinerStats::get_future_win_distribution( &active_miners_and_commits, &unconfirmed_block_commits, &commit_outs, - ); - win_probs + ) } else { // look at the current spends let Ok(unconfirmed_burn_dist) = miner_stats @@ -229,8 +228,7 @@ fn cli_get_miner_spend( return 0.0; }; - let win_probs = MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist); - win_probs + MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; info!("Unconfirmed spend distribution: {:?}", &spend_dist); @@ -428,7 +426,6 @@ fn main() { let mut run_loop = helium::RunLoop::new(conf); if let Err(e) = run_loop.start(num_round) { warn!("Helium runloop exited: {}", e); - return; } } else if conf.burnchain.mode == "neon" || conf.burnchain.mode == "nakamoto-neon" diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 7cda49e10d..ecf37ae0ec 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -148,7 +148,7 @@ impl StacksNode { let burnchain = runloop.get_burnchain(); let atlas_config = config.atlas.clone(); let mut keychain = Keychain::default(config.node.seed.clone()); - if let Some(mining_key) = config.miner.mining_key.clone() { + if let Some(mining_key) = config.miner.mining_key { keychain.set_nakamoto_sk(mining_key); } @@ -195,7 +195,7 @@ impl StacksNode { match &data_from_neon.leader_key_registration_state { LeaderKeyRegistrationState::Active(registered_key) => { let pubkey_hash = keychain.get_nakamoto_pkh(); - if pubkey_hash.as_ref() == ®istered_key.memo { + if pubkey_hash.as_ref() == registered_key.memo { data_from_neon.leader_key_registration_state } else { LeaderKeyRegistrationState::Inactive @@ -366,7 +366,7 @@ pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) return; }; - let mut f = match fs::File::create(&path) { + let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { warn!("Failed to create {}: {:?}", &path, &e); @@ -374,7 +374,7 @@ pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) } }; - if let Err(e) = f.write_all(key_json.as_str().as_bytes()) { + if let Err(e) = f.write_all(key_json.as_bytes()) { warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); return; } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 150762e965..042df70be1 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -67,6 +67,7 @@ pub static TEST_SKIP_P2P_BROADCAST: std::sync::Mutex> = std::sync:: /// miner thread sleep before trying again? const ABORT_TRY_AGAIN_MS: u64 = 200; +#[allow(clippy::large_enum_variant)] pub enum MinerDirective { /// The miner won sortition so they should begin a new tenure BeginTenure { @@ -424,7 +425,7 @@ impl BlockMinerThread { // update mined-block counters and mined-tenure counters self.globals.counters.bump_naka_mined_blocks(); - if !self.last_block_mined.is_none() { + if self.last_block_mined.is_some() { // this is the first block of the tenure, bump tenure counter self.globals.counters.bump_naka_mined_tenures(); } @@ -573,12 +574,12 @@ impl BlockMinerThread { &self.burnchain, &sort_db, &mut chain_state, - &stackerdbs, + stackerdbs, &self.globals.counters, &self.burn_election_block.consensus_hash, )?; - return Ok((reward_set, signature)); + Ok((reward_set, signature)) } /// Fault injection -- possibly fail to broadcast @@ -590,13 +591,12 @@ impl BlockMinerThread { .fault_injection_block_push_fail_probability .unwrap_or(0) .min(100); - let will_drop = if drop_prob > 0 { + if drop_prob > 0 { let throw: u8 = thread_rng().gen_range(0..100); throw < drop_prob } else { false - }; - will_drop + } } /// Store a block to the chainstate, and if successful (it should be since we mined it), @@ -621,7 +621,7 @@ impl BlockMinerThread { let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; let accepted = NakamotoChainState::accept_block( &chainstate_config, - &block, + block, &mut sortition_handle, &staging_tx, headers_conn, @@ -704,7 +704,7 @@ impl BlockMinerThread { miner_privkey, &sort_db, &self.burn_block, - &stackerdbs, + stackerdbs, SignerMessage::BlockPushed(block), MinerSlotID::BlockPushed, chain_state.mainnet, @@ -869,24 +869,21 @@ impl BlockMinerThread { "Stacks block parent ID may be an epoch2x block: {}", &self.parent_tenure_id ); - let epoch2_header = - NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) - .map_err(|e| { - error!( - "Could not query header info for epoch2x tenure block ID {}: {:?}", - &self.parent_tenure_id, &e - ); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!( - "No header info for epoch2x tenure block ID {}", - &self.parent_tenure_id - ); - NakamotoNodeError::ParentNotFound - })?; - - epoch2_header + NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) + .map_err(|e| { + error!( + "Could not query header info for epoch2x tenure block ID {}: {:?}", + &self.parent_tenure_id, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No header info for epoch2x tenure block ID {}", + &self.parent_tenure_id + ); + NakamotoNodeError::ParentNotFound + })? } }; @@ -1147,9 +1144,9 @@ impl BlockMinerThread { let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let mut payload = TenureChangePayload { - tenure_consensus_hash: self.burn_election_block.consensus_hash.clone(), + tenure_consensus_hash: self.burn_election_block.consensus_hash, prev_tenure_consensus_hash: parent_tenure_info.parent_tenure_consensus_hash, - burn_view_consensus_hash: self.burn_election_block.consensus_hash.clone(), + burn_view_consensus_hash: self.burn_election_block.consensus_hash, previous_tenure_end: parent_block_id, previous_tenure_blocks: u32::try_from(parent_tenure_info.parent_tenure_blocks) .expect("FATAL: more than u32 blocks in a tenure"), @@ -1252,7 +1249,7 @@ impl ParentStacksBlockInfo { } let Ok(Some(parent_tenure_header)) = - NakamotoChainState::get_block_header(chain_state.db(), &parent_tenure_id) + NakamotoChainState::get_block_header(chain_state.db(), parent_tenure_id) else { warn!("Failed loading parent tenure ID"; "parent_tenure_id" => %parent_tenure_id); return Err(NakamotoNodeError::ParentNotFound); @@ -1293,7 +1290,7 @@ impl ParentStacksBlockInfo { } else { 1 }; - let parent_tenure_consensus_hash = parent_tenure_header.consensus_hash.clone(); + let parent_tenure_consensus_hash = parent_tenure_header.consensus_hash; Some(ParentTenureInfo { parent_tenure_blocks, parent_tenure_consensus_hash, @@ -1321,7 +1318,7 @@ impl ParentStacksBlockInfo { let account = chain_state .with_read_only_clarity_tx( &burn_db - .index_handle_at_block(&chain_state, &stacks_tip_header.index_block_hash()) + .index_handle_at_block(chain_state, &stacks_tip_header.index_block_hash()) .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &stacks_tip_header.index_block_hash(), |conn| StacksChainState::get_account(conn, &principal), diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 004023ea26..78deb69b9f 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -227,6 +227,7 @@ impl PeerThread { /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not + #[allow(clippy::borrowed_box)] pub(crate) fn run_one_pass( &mut self, indexer: &B, @@ -238,7 +239,7 @@ impl PeerThread { ) -> bool { // initial block download? let ibd = self.globals.sync_comms.get_ibd(); - let download_backpressure = self.results_with_data.len() > 0; + let download_backpressure = !self.results_with_data.is_empty(); let poll_ms = if !download_backpressure && self.net.has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( @@ -258,7 +259,7 @@ impl PeerThread { // NOTE: handler_args must be created such that it outlives the inner net.run() call and // doesn't ref anything within p2p_thread. let handler_args = RPCHandlerArgs { - exit_at_block_height: self.config.burnchain.process_exit_at_block_height.clone(), + exit_at_block_height: self.config.burnchain.process_exit_at_block_height, genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) .unwrap(), event_observer: Some(event_dispatcher), @@ -266,7 +267,6 @@ impl PeerThread { cost_metric: Some(cost_metric.as_ref()), fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), coord_comms: Some(&self.globals.coord_comms), - ..RPCHandlerArgs::default() }; self.net.run( indexer, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index ef01f67f4b..441d7ecd2c 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -67,6 +67,7 @@ use crate::run_loop::RegisteredKey; use crate::BitcoinRegtestController; /// Command types for the Nakamoto relayer thread, issued to it by other threads +#[allow(clippy::large_enum_variant)] pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and HandleNetResult(NetworkResult), @@ -142,7 +143,7 @@ impl LastCommit { /// What's the parent tenure's tenure-start block hash? pub fn parent_tenure_id(&self) -> StacksBlockId { - StacksBlockId(self.block_commit.block_header_hash.clone().0) + StacksBlockId(self.block_commit.block_header_hash.0) } /// What's the stacks tip at the time of commit? @@ -167,7 +168,7 @@ impl LastCommit { /// Set our txid pub fn set_txid(&mut self, txid: &Txid) { - self.txid = Some(txid.clone()); + self.txid = Some(*txid); } } @@ -302,6 +303,8 @@ impl RelayerThread { /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? + #[allow(clippy::nonminimal_bool)] + #[allow(clippy::eq_op)] fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place (self.min_network_download_passes <= self.last_network_download_passes @@ -497,7 +500,7 @@ impl RelayerThread { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo: miner_pkh.as_bytes().to_vec(), - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 0, txid: Txid([0u8; 32]), block_height: 0, @@ -564,7 +567,7 @@ impl RelayerThread { let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( &mut self.chainstate.index_conn(), &stacks_tip, - &tip_block_ch, + tip_block_ch, ) .map_err(|e| { error!( @@ -730,9 +733,7 @@ impl RelayerThread { /// * last_burn_block corresponds to the canonical sortition DB's chain tip /// * the time of issuance is sufficiently recent /// * there are no unprocessed stacks blocks in the staging DB - /// * the relayer has already tried a download scan that included this sortition (which, if a - /// block was found, would have placed it into the staging DB and marked it as - /// unprocessed) + /// * the relayer has already tried a download scan that included this sortition (which, if a block was found, would have placed it into the staging DB and marked it as unprocessed) /// * a miner thread is not running already fn create_block_miner( &mut self, @@ -750,11 +751,11 @@ impl RelayerThread { return Err(NakamotoNodeError::FaultInjection); } - let burn_header_hash = burn_tip.burn_header_hash.clone(); + let burn_header_hash = burn_tip.burn_header_hash; let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); + let burn_chain_tip = burn_chain_sn.burn_header_hash; if burn_chain_tip != burn_header_hash { debug!( @@ -1067,7 +1068,7 @@ impl RelayerThread { // sign and broadcast let mut op_signer = self.keychain.generate_op_signer(); let res = self.bitcoin_controller.submit_operation( - last_committed.get_epoch_id().clone(), + *last_committed.get_epoch_id(), BlockstackOperationType::LeaderBlockCommit(last_committed.get_block_commit().clone()), &mut op_signer, 1, @@ -1299,7 +1300,7 @@ impl RelayerThread { let mut saved_key_opt = None; if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { saved_key_opt = - Self::load_saved_vrf_key(&path, &self.keychain.get_nakamoto_pkh()); + Self::load_saved_vrf_key(path, &self.keychain.get_nakamoto_pkh()); } if let Some(saved_key) = saved_key_opt { debug!("Relayer: resuming VRF key"); @@ -1371,9 +1372,9 @@ pub mod test { let pubkey_hash = Hash160::from_node_public_key(&pk); let path = "/tmp/does_not_exist.json"; - _ = std::fs::remove_file(&path); + _ = std::fs::remove_file(path); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); } @@ -1384,13 +1385,13 @@ pub mod test { let pubkey_hash = Hash160::from_node_public_key(&pk); let path = "/tmp/empty.json"; - File::create(&path).expect("Failed to create test file"); - assert!(Path::new(&path).exists()); + File::create(path).expect("Failed to create test file"); + assert!(Path::new(path).exists()); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1403,15 +1404,15 @@ pub mod test { let json_content = r#"{ "hello": "world" }"#; // Write the JSON content to the file - let mut file = File::create(&path).expect("Failed to create test file"); + let mut file = File::create(path).expect("Failed to create test file"); file.write_all(json_content.as_bytes()) .expect("Failed to write to test file"); - assert!(Path::new(&path).exists()); + assert!(Path::new(path).exists()); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1432,10 +1433,10 @@ pub mod test { let path = "/tmp/vrf_key.json"; save_activated_vrf_key(path, &key); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_some()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1460,9 +1461,9 @@ pub mod test { let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); let pubkey_hash = Hash160::from_node_public_key(&pk); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 697dddeb03..b2f892e1f1 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -188,6 +188,7 @@ impl SignCoordinator { } /// Send a message over the miners contract using a `StacksPrivateKey` + #[allow(clippy::too_many_arguments)] pub fn send_miners_message( miner_sk: &StacksPrivateKey, sortdb: &SortitionDB, @@ -199,7 +200,7 @@ impl SignCoordinator { miners_session: &mut StackerDBSession, election_sortition: &ConsensusHash, ) -> Result<(), String> { - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &election_sortition) + let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, election_sortition) .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? else { return Err("No slot for miner".into()); @@ -222,7 +223,7 @@ impl SignCoordinator { .saturating_add(1); let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message.serialize_to_vec()); chunk - .sign(&miner_sk) + .sign(miner_sk) .map_err(|_| "Failed to sign StackerDB chunk")?; match miners_session.put_chunk(&chunk) { @@ -270,13 +271,14 @@ impl SignCoordinator { /// to the signers, and then waits for the signers to respond /// with their signatures. It does so in two ways, concurrently: /// * It waits for signer StackerDB messages with signatures. If enough signatures can be - /// found, then the block can be broadcast. + /// found, then the block can be broadcast. /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are - /// loaded and returned. This can happen if the node receives the block via a signer who - /// fetched all signatures and assembled the signature vector, all before we could. + /// loaded and returned. This can happen if the node receives the block via a signer who + /// fetched all signatures and assembled the signature vector, all before we could. // Mutants skip here: this function is covered via integration tests, // which the mutation testing does not see. #[cfg_attr(test, mutants::skip)] + #[allow(clippy::too_many_arguments)] pub fn run_sign_v0( &mut self, block: &NakamotoBlock, @@ -306,7 +308,7 @@ impl SignCoordinator { &self.message_key, sortdb, burn_tip, - &stackerdbs, + stackerdbs, block_proposal_message, MinerSlotID::BlockProposal, self.is_mainnet, @@ -367,7 +369,7 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } - if Self::check_burn_tip_changed(&sortdb, &burn_tip) { + if Self::check_burn_tip_changed(sortdb, burn_tip) { debug!("SignCoordinator: Exiting due to new burnchain tip"); return Err(NakamotoNodeError::BurnchainTipChanged); } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index dcfa855c9b..efc64bf8e7 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -62,79 +62,85 @@ /// [11] Notifies about new transaction attachment events /// [12] Signals VRF key registration /// -/// When the node is running, there are 4-5 active threads at once. They are: +/// When the node is running, there are 4-5 active threads at once. They are: /// -/// * **RunLoop Thread**: This is the main thread, whose code body lives in src/run_loop/neon.rs. -/// This thread is responsible for: -/// * Bootup -/// * Running the burnchain indexer -/// * Notifying the ChainsCoordinator thread when there are new burnchain blocks to process +/// * **RunLoop Thread**: +/// This is the main thread, whose code body lives in `src/run_loop/neon.rs`. +/// This thread is responsible for: +/// * Bootup +/// * Running the burnchain indexer +/// * Notifying the ChainsCoordinator thread when there are new burnchain blocks to process /// -/// * **Relayer Thread**: This is the thread that stores and relays blocks and microblocks. Both -/// it and the ChainsCoordinator thread are very I/O-heavy threads, and care has been taken to -/// ensure that neither one attempts to acquire a write-lock in the underlying databases. -/// Specifically, this thread directs the ChainsCoordinator thread when to process new Stacks -/// blocks, and it directs the miner thread (if running) to stop when either it or the -/// ChainsCoordinator thread needs to acquire the write-lock. -/// This thread is responsible for: -/// * Receiving new blocks and microblocks from the P2P thread via a shared channel -/// * (Sychronously) requesting the CoordinatorThread to process newly-stored Stacks blocks and -/// microblocks -/// * Building up the node's unconfirmed microblock stream state, and sharing it with the P2P -/// thread so it can answer queries about the unconfirmed microblock chain -/// * Pushing newly-discovered blocks and microblocks to the P2P thread for broadcast -/// * Registering the VRF public key for the miner -/// * Spawning the block and microblock miner threads, and stopping them if their continued -/// execution would inhibit block or microblock storage or processing. -/// * Submitting the burnchain operation to commit to a freshly-mined block +/// * **Relayer Thread**: +/// This is the thread that stores and relays blocks and microblocks. Both +/// it and the ChainsCoordinator thread are very I/O-heavy threads, and care has been taken to +/// ensure that neither one attempts to acquire a write-lock in the underlying databases. +/// Specifically, this thread directs the ChainsCoordinator thread when to process new Stacks +/// blocks, and it directs the miner thread (if running) to stop when either it or the +/// ChainsCoordinator thread needs to acquire the write-lock. +/// This thread is responsible for: +/// * Receiving new blocks and microblocks from the P2P thread via a shared channel +/// * (Synchronously) requesting the CoordinatorThread to process newly-stored Stacks blocks +/// and microblocks +/// * Building up the node's unconfirmed microblock stream state, and sharing it with the P2P +/// thread so it can answer queries about the unconfirmed microblock chain +/// * Pushing newly-discovered blocks and microblocks to the P2P thread for broadcast +/// * Registering the VRF public key for the miner +/// * Spawning the block and microblock miner threads, and stopping them if their continued +/// execution would inhibit block or microblock storage or processing. +/// * Submitting the burnchain operation to commit to a freshly-mined block /// -/// * **Miner thread**: This is the thread that actually produces new blocks and microblocks. It -/// is spawned only by the Relayer thread to carry out mining activity when the underlying -/// chainstate is not needed by either the Relayer or ChainsCoordinator threeads. -/// This thread does the following: -/// * Walk the mempool DB to build a new block or microblock -/// * Return the block or microblock to the Relayer thread +/// * **Miner Thread**: +/// This is the thread that actually produces new blocks and microblocks. It +/// is spawned only by the Relayer thread to carry out mining activity when the underlying +/// chainstate is not needed by either the Relayer or ChainsCoordinator threads. +/// This thread does the following: +/// * Walk the mempool DB to build a new block or microblock +/// * Return the block or microblock to the Relayer thread /// -/// * **P2P Thread**: This is the thread that communicates with the rest of the p2p network, and -/// handles RPC requests. It is meant to do as little storage-write I/O as possible to avoid lock -/// contention with the Miner, Relayer, and ChainsCoordinator threads. In particular, it forwards -/// data it receives from the p2p thread to the Relayer thread for I/O-bound processing. At the -/// time of this writing, it still requires holding a write-lock to handle some RPC request, but -/// future work will remove this so that this thread's execution will not interfere with the -/// others. This is the only thread that does socket I/O. -/// This thread runs the PeerNetwork state machines, which include the following: -/// * Learning the node's public IP address -/// * Discovering neighbor nodes -/// * Forwarding newly-discovered blocks, microblocks, and transactions from the Relayer thread to -/// other neighbors -/// * Synchronizing block and microblock inventory state with other neighbors -/// * Downloading blocks and microblocks, and passing them to the Relayer for storage and processing -/// * Downloading transaction attachments as their hashes are discovered during block processing -/// * Synchronizing the local mempool database with other neighbors -/// (notifications for new attachments come from a shared channel in the ChainsCoordinator thread) -/// * Handling HTTP requests +/// * **P2P Thread**: +/// This is the thread that communicates with the rest of the P2P network, and +/// handles RPC requests. It is meant to do as little storage-write I/O as possible to avoid lock +/// contention with the Miner, Relayer, and ChainsCoordinator threads. In particular, it forwards +/// data it receives from the P2P thread to the Relayer thread for I/O-bound processing. At the +/// time of this writing, it still requires holding a write-lock to handle some RPC requests, but +/// future work will remove this so that this thread's execution will not interfere with the +/// others. This is the only thread that does socket I/O. +/// This thread runs the PeerNetwork state machines, which include the following: +/// * Learning the node's public IP address +/// * Discovering neighbor nodes +/// * Forwarding newly-discovered blocks, microblocks, and transactions from the Relayer thread +/// to other neighbors +/// * Synchronizing block and microblock inventory state with other neighbors +/// * Downloading blocks and microblocks, and passing them to the Relayer for storage and +/// processing +/// * Downloading transaction attachments as their hashes are discovered during block processing +/// * Synchronizing the local mempool database with other neighbors +/// (notifications for new attachments come from a shared channel in the ChainsCoordinator thread) +/// * Handling HTTP requests /// -/// * **ChainsCoordinator Thread**: This thread process sortitions and Stacks blocks and -/// microblocks, and handles PoX reorgs should they occur (this mainly happens in boot-up). It, -/// like the Relayer thread, is a very I/O-heavy thread, and it will hold a write-lock on the -/// chainstate DBs while it works. Its actions are controlled by a CoordinatorComms structure in -/// the Globals shared state, which the Relayer thread and RunLoop thread both drive (the former -/// drives Stacks blocks processing, the latter sortitions). -/// This thread is responsible for: -/// * Responding to requests from other threads to process sortitions -/// * Responding to requests from other threads to process Stacks blocks and microblocks -/// * Processing PoX chain reorgs, should they ever happen -/// * Detecting attachment creation events, and informing the P2P thread of them so it can go -/// and download them +/// * **ChainsCoordinator Thread**: +/// This thread processes sortitions and Stacks blocks and +/// microblocks, and handles PoX reorgs should they occur (this mainly happens in boot-up). It, +/// like the Relayer thread, is a very I/O-heavy thread, and it will hold a write-lock on the +/// chainstate DBs while it works. Its actions are controlled by a CoordinatorComms structure in +/// the Globals shared state, which the Relayer thread and RunLoop thread both drive (the former +/// drives Stacks blocks processing, the latter sortitions). +/// This thread is responsible for: +/// * Responding to requests from other threads to process sortitions +/// * Responding to requests from other threads to process Stacks blocks and microblocks +/// * Processing PoX chain reorgs, should they ever happen +/// * Detecting attachment creation events, and informing the P2P thread of them so it can go +/// and download them /// /// In addition to the mempool and chainstate databases, these threads share access to a Globals -/// singleton that contains soft state shared between threads. Mainly, the Globals struct is meant -/// to store inter-thread shared singleton communication media all in one convenient struct. Each -/// thread has a handle to the struct's shared state handles. Global state includes: -/// * The global flag as to whether or not the miner thread can be running -/// * The global shutdown flag that, when set, causes all threads to terminate -/// * Sender channel endpoints that can be shared between threads -/// * Metrics about the node's behavior (e.g. number of blocks processed, etc.) +/// singleton that contains soft state shared between threads. Mainly, the Globals struct is meant +/// to store inter-thread shared singleton communication media all in one convenient struct. Each +/// thread has a handle to the struct's shared state handles. Global state includes: +/// * The global flag as to whether or not the miner thread can be running +/// * The global shutdown flag that, when set, causes all threads to terminate +/// * Sender channel endpoints that can be shared between threads +/// * Metrics about the node's behavior (e.g. number of blocks processed, etc.) /// /// This file may be refactored in the future into a full-fledged module. use std::cmp; @@ -230,6 +236,7 @@ pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB type MinedBlocks = HashMap; /// Result of running the miner thread. It could produce a Stacks block or a microblock. +#[allow(clippy::large_enum_variant)] pub(crate) enum MinerThreadResult { Block( AssembledAnchorBlock, @@ -646,8 +653,8 @@ impl MicroblockMinerThread { sortdb: Some(sortdb), mempool: Some(mempool), event_dispatcher: relayer_thread.event_dispatcher.clone(), - parent_consensus_hash: ch.clone(), - parent_block_hash: bhh.clone(), + parent_consensus_hash: ch, + parent_block_hash: bhh, miner_key, frequency, last_mined: 0, @@ -743,7 +750,7 @@ impl MicroblockMinerThread { let mint_result = { let ic = sortdb.index_handle_at_block( - &chainstate, + chainstate, &block_snapshot.get_canonical_stacks_block_id(), )?; let mut microblock_miner = match StacksMicroblockBuilder::resume_unconfirmed( @@ -810,7 +817,7 @@ impl MicroblockMinerThread { use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { // record this microblock somewhere - if !fs::metadata(&path).is_ok() { + if fs::metadata(&path).is_err() { fs::create_dir_all(&path) .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); } @@ -827,7 +834,7 @@ impl MicroblockMinerThread { r#"{{"microblock":"{}","parent_consensus":"{}","parent_block":"{}"}}"#, &mblock_bits_hex, &self.parent_consensus_hash, &self.parent_block_hash ); - file.write_all(&mblock_json.as_bytes()).unwrap_or_else(|_| { + file.write_all(mblock_json.as_bytes()).unwrap_or_else(|_| { panic!("FATAL: failed to write microblock bits to '{:?}'", &path) }); info!( @@ -873,7 +880,7 @@ impl MicroblockMinerThread { // update unconfirmed state cost self.cost_so_far = new_cost; self.quantity += 1; - return Ok(mined_microblock); + Ok(mined_microblock) } /// Can this microblock miner mine off of this given tip? @@ -1086,6 +1093,7 @@ impl BlockMinerThread { } /// Constructs and returns a LeaderBlockCommitOp out of the provided params. + #[allow(clippy::too_many_arguments)] fn inner_generate_block_commit_op( &self, block_header_hash: BlockHeaderHash, @@ -1202,7 +1210,7 @@ impl BlockMinerThread { .expect("FATAL: could not query chain tips") }; - if stacks_tips.len() == 0 { + if stacks_tips.is_empty() { return vec![]; } @@ -1213,7 +1221,7 @@ impl BlockMinerThread { .filter(|candidate| Self::is_on_canonical_burnchain_fork(candidate, &sortdb_tip_handle)) .collect(); - if stacks_tips.len() == 0 { + if stacks_tips.is_empty() { return vec![]; } @@ -1269,7 +1277,7 @@ impl BlockMinerThread { pub(crate) fn sort_and_populate_candidates( mut candidates: Vec, ) -> Vec { - if candidates.len() == 0 { + if candidates.is_empty() { return candidates; } candidates.sort_by(|tip1, tip2| { @@ -1373,7 +1381,7 @@ impl BlockMinerThread { // identify leaf tips -- i.e. blocks with no children let parent_consensus_hashes: HashSet<_> = stacks_tips .iter() - .map(|x| x.parent_consensus_hash.clone()) + .map(|x| x.parent_consensus_hash) .collect(); let mut leaf_tips: Vec<_> = stacks_tips @@ -1381,7 +1389,7 @@ impl BlockMinerThread { .filter(|x| !parent_consensus_hashes.contains(&x.consensus_hash)) .collect(); - if leaf_tips.len() == 0 { + if leaf_tips.is_empty() { return None; } @@ -1502,7 +1510,7 @@ impl BlockMinerThread { } } - if scores.len() == 0 { + if scores.is_empty() { // revert to prior tie-breaking scheme return None; } @@ -1576,14 +1584,14 @@ impl BlockMinerThread { let chain_tip = ChainTip::genesis( &burnchain_params.first_block_hash, - burnchain_params.first_block_height.into(), + burnchain_params.first_block_height, burnchain_params.first_block_timestamp.into(), ); ( Some(ParentStacksBlockInfo { stacks_parent_header: chain_tip.metadata, - parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH, parent_block_burn_height: 0, parent_block_total_burn: 0, parent_winning_vtxindex: 0, @@ -1671,7 +1679,7 @@ impl BlockMinerThread { { if (prev_block.anchored_block.header.parent_microblock == BlockHeaderHash([0u8; 32]) - && stream.len() == 0) + && stream.is_empty()) || (prev_block.anchored_block.header.parent_microblock != BlockHeaderHash([0u8; 32]) && stream.len() @@ -1699,30 +1707,26 @@ impl BlockMinerThread { best_attempt = cmp::max(best_attempt, prev_block.attempt); } - } else { - if !force { - // no microblock stream to confirm, and the stacks tip hasn't changed - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", - &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height); + } else if !force { + // no microblock stream to confirm, and the stacks tip hasn't changed + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", + &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, + prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height); - return None; - } + return None; } - } else { - if self.burn_block.burn_header_hash == prev_block.burn_hash { - // only try and re-mine if there was no sortition since the last chain tip - info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", + } else if self.burn_block.burn_header_hash == prev_block.burn_hash { + // only try and re-mine if there was no sortition since the last chain tip + info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); - best_attempt = cmp::max(best_attempt, prev_block.attempt); - // Since the chain tip has changed, we should try to mine a new block, even - // if it has less transactions than the previous block we mined, since that - // previous block would now be a reorg. - max_txs = 0; - } else { - info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", - &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); - } + best_attempt = cmp::max(best_attempt, prev_block.attempt); + // Since the chain tip has changed, we should try to mine a new block, even + // if it has less transactions than the previous block we mined, since that + // previous block would now be a reorg. + max_txs = 0; + } else { + info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", + &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); } } (best_attempt + 1, max_txs) @@ -1838,7 +1842,7 @@ impl BlockMinerThread { }; if let Some((ref microblocks, ref poison_opt)) = µblock_info_opt { - if let Some(ref tail) = microblocks.last() { + if let Some(tail) = microblocks.last() { debug!( "Confirm microblock stream tailed at {} (seq {})", &tail.block_hash(), @@ -1848,8 +1852,7 @@ impl BlockMinerThread { // try and confirm as many microblocks as we can (but note that the stream itself may // be too long; we'll try again if that happens). - stacks_parent_header.microblock_tail = - microblocks.last().clone().map(|blk| blk.header.clone()); + stacks_parent_header.microblock_tail = microblocks.last().map(|blk| blk.header.clone()); if let Some(poison_payload) = poison_opt { debug!("Detected poisoned microblock fork: {:?}", &poison_payload); @@ -1868,7 +1871,7 @@ impl BlockMinerThread { if let Err(e) = mem_pool.miner_submit( chain_state, sortdb, - &parent_consensus_hash, + parent_consensus_hash, &stacks_parent_header.anchored_header.block_hash(), &poison_microblock_tx, Some(&self.event_dispatcher), @@ -1920,6 +1923,7 @@ impl BlockMinerThread { } /// Obtain the target burn fee cap, when considering how well this miner is performing. + #[allow(clippy::too_many_arguments)] pub fn get_mining_spend_amount( config: &Config, keychain: &Keychain, @@ -1974,7 +1978,7 @@ impl BlockMinerThread { else { return config_file_burn_fee_cap; }; - if active_miners_and_commits.len() == 0 { + if active_miners_and_commits.is_empty() { warn!("No active miners detected; using config file burn_fee_cap"); return config_file_burn_fee_cap; } @@ -2009,16 +2013,15 @@ impl BlockMinerThread { let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( &active_miners_and_commits, &unconfirmed_block_commits, - &recipients, + recipients, ); let win_probs = if miner_config.fast_rampup { // look at spends 6+ blocks in the future - let win_probs = MinerStats::get_future_win_distribution( + MinerStats::get_future_win_distribution( &active_miners_and_commits, &unconfirmed_block_commits, - &recipients, - ); - win_probs + recipients, + ) } else { // look at the current spends let Ok(unconfirmed_burn_dist) = miner_stats @@ -2038,8 +2041,7 @@ impl BlockMinerThread { return config_file_burn_fee_cap; }; - let win_probs = MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist); - win_probs + MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; info!("Unconfirmed spend distribution: {:?}", &spend_dist); @@ -2102,6 +2104,7 @@ impl BlockMinerThread { /// Produce the block-commit for this anchored block, if we can. /// Returns the op on success /// Returns None if we fail somehow. + #[allow(clippy::too_many_arguments)] pub fn make_block_commit( &self, burn_db: &mut SortitionDB, @@ -2227,12 +2230,10 @@ impl BlockMinerThread { if let Some(highest_unprocessed_block_sn) = highest_unprocessed_block_sn_opt { if stacks_tip.anchored_header.height() + u64::from(burnchain.pox_constants.prepare_length) - - 1 - >= highest_unprocessed.height + > highest_unprocessed.height && highest_unprocessed_block_sn.block_height + u64::from(burnchain.pox_constants.prepare_length) - - 1 - >= sort_tip.block_height + > sort_tip.block_height { // we're close enough to the chain tip that it's a bad idea for us to mine // -- we'll likely create an orphan @@ -2243,7 +2244,7 @@ impl BlockMinerThread { } } // we can mine - return false; + false } /// Only used in mock signing to generate a peer info view @@ -2301,16 +2302,14 @@ impl BlockMinerThread { // Just wait a min amount of time for the mock signatures to come in while mock_signatures.len() < slot_ids.len() && mock_poll_start.elapsed() < timeout { let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; - for chunk in chunks { - if let Some(chunk) = chunk { - if let Ok(SignerMessage::MockSignature(mock_signature)) = - SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + for chunk in chunks.into_iter().flatten() { + if let Ok(SignerMessage::MockSignature(mock_signature)) = + SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + { + if mock_signature.mock_proposal == *mock_proposal + && !mock_signatures.contains(&mock_signature) { - if mock_signature.mock_proposal == *mock_proposal - && !mock_signatures.contains(&mock_signature) - { - mock_signatures.push(mock_signature); - } + mock_signatures.push(mock_signature); } } } @@ -2325,19 +2324,17 @@ impl BlockMinerThread { StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); let miner_slot_ids: Vec<_> = (0..MINER_SLOT_COUNT * 2).collect(); if let Ok(messages) = miners_stackerdb.get_latest_chunks(&miner_slot_ids) { - for message in messages { - if let Some(message) = message { - if message.is_empty() { - continue; - } - let Ok(SignerMessage::MockBlock(mock_block)) = - SignerMessage::consensus_deserialize(&mut message.as_slice()) - else { - continue; - }; - if mock_block.mock_proposal.peer_info == *peer_info { - return true; - } + for message in messages.into_iter().flatten() { + if message.is_empty() { + continue; + } + let Ok(SignerMessage::MockBlock(mock_block)) = + SignerMessage::consensus_deserialize(&mut message.as_slice()) + else { + continue; + }; + if mock_block.mock_proposal.peer_info == *peer_info { + return true; } } } @@ -2939,6 +2936,8 @@ impl RelayerThread { /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? + #[allow(clippy::nonminimal_bool)] + #[allow(clippy::eq_op)] pub fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place (self.min_network_download_passes <= self.last_network_download_passes @@ -3022,7 +3021,7 @@ impl RelayerThread { net_receipts.processed_unconfirmed_state.receipts.len(); if num_unconfirmed_microblock_tx_receipts > 0 { if let Some(unconfirmed_state) = self.chainstate_ref().unconfirmed_state.as_ref() { - let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); + let canonical_tip = unconfirmed_state.confirmed_chain_tip; self.event_dispatcher.process_new_microblocks( canonical_tip, net_receipts.processed_unconfirmed_state, @@ -3094,7 +3093,7 @@ impl RelayerThread { if !Relayer::static_check_problematic_relayed_block( self.chainstate_ref().mainnet, epoch_id, - &anchored_block, + anchored_block, ASTRules::PrecheckSize, ) { // nope! @@ -3107,7 +3106,7 @@ impl RelayerThread { use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { // record this block somewhere - if !fs::metadata(&path).is_ok() { + if fs::metadata(&path).is_err() { fs::create_dir_all(&path) .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); } @@ -3123,7 +3122,7 @@ impl RelayerThread { r#"{{"block":"{}","consensus":"{}"}}"#, &block_bits_hex, &consensus_hash ); - file.write_all(&block_json.as_bytes()).unwrap_or_else(|_| { + file.write_all(block_json.as_bytes()).unwrap_or_else(|_| { panic!("FATAL: failed to write block bits to '{:?}'", &path) }); info!( @@ -3154,8 +3153,8 @@ impl RelayerThread { chainstate.preprocess_anchored_block( &ic, consensus_hash, - &anchored_block, - &parent_consensus_hash, + anchored_block, + parent_consensus_hash, 0, ) })?; @@ -3283,15 +3282,13 @@ impl RelayerThread { }; // advertize _and_ push blocks for now - let blocks_available = Relayer::load_blocks_available_data( - self.sortdb_ref(), - vec![consensus_hash.clone()], - ) - .expect("Failed to obtain block information for a block we mined."); + let blocks_available = + Relayer::load_blocks_available_data(self.sortdb_ref(), vec![consensus_hash]) + .expect("Failed to obtain block information for a block we mined."); let block_data = { let mut bd = HashMap::new(); - bd.insert(consensus_hash.clone(), mined_block.clone()); + bd.insert(consensus_hash, mined_block.clone()); bd }; @@ -3314,7 +3311,7 @@ impl RelayerThread { ); miner_tip = Self::pick_higher_tip(miner_tip, None); } else { - let ch = snapshot.consensus_hash.clone(); + let ch = snapshot.consensus_hash; let bh = mined_block.block_hash(); let height = mined_block.header.total_work.work; @@ -3391,7 +3388,7 @@ impl RelayerThread { let tenures = if let Some(last_ch) = self.last_tenure_consensus_hash.as_ref() { let mut tenures = vec![]; let last_sn = - SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), &last_ch) + SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), last_ch) .expect("FATAL: failed to query sortition DB") .expect("FATAL: unknown prior consensus hash"); @@ -3470,8 +3467,7 @@ impl RelayerThread { .expect("FATAL: failed to query sortition DB") .expect("FATAL: no snapshot for consensus hash"); - let old_last_mined_blocks = - mem::replace(&mut self.last_mined_blocks, MinedBlocks::new()); + let old_last_mined_blocks = mem::take(&mut self.last_mined_blocks); self.last_mined_blocks = Self::clear_stale_mined_blocks(this_burn_tip.block_height, old_last_mined_blocks); @@ -3553,7 +3549,7 @@ impl RelayerThread { /// cost since we won't be mining it anymore. fn setup_microblock_mining_state(&mut self, new_miner_tip: Option) { // update state - let my_miner_tip = std::mem::replace(&mut self.miner_tip, None); + let my_miner_tip = std::mem::take(&mut self.miner_tip); let best_tip = Self::pick_higher_tip(my_miner_tip.clone(), new_miner_tip.clone()); if best_tip == new_miner_tip && best_tip != my_miner_tip { // tip has changed @@ -3597,7 +3593,7 @@ impl RelayerThread { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo, - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 0, txid: Txid([0u8; 32]), block_height: 0, @@ -3676,14 +3672,14 @@ impl RelayerThread { /// Create the block miner thread state. /// Only proceeds if all of the following are true: - /// * the miner is not blocked - /// * last_burn_block corresponds to the canonical sortition DB's chain tip - /// * the time of issuance is sufficiently recent - /// * there are no unprocessed stacks blocks in the staging DB - /// * the relayer has already tried a download scan that included this sortition (which, if a - /// block was found, would have placed it into the staging DB and marked it as - /// unprocessed) - /// * a miner thread is not running already + /// * The miner is not blocked + /// * `last_burn_block` corresponds to the canonical sortition DB's chain tip + /// * The time of issuance is sufficiently recent + /// * There are no unprocessed stacks blocks in the staging DB + /// * The relayer has already tried a download scan that included this sortition (which, if a + /// block was found, would have placed it into the staging DB and marked it as + /// unprocessed) + /// * A miner thread is not running already fn create_block_miner( &mut self, registered_key: RegisteredKey, @@ -3724,11 +3720,11 @@ impl RelayerThread { } } - let burn_header_hash = last_burn_block.burn_header_hash.clone(); + let burn_header_hash = last_burn_block.burn_header_hash; let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); + let burn_chain_tip = burn_chain_sn.burn_header_hash; if burn_chain_tip != burn_header_hash { debug!( @@ -3797,6 +3793,7 @@ impl RelayerThread { /// Try to start up a block miner thread with this given VRF key and current burnchain tip. /// Returns true if the thread was started; false if it was not (for any reason) + #[allow(clippy::incompatible_msrv)] pub fn block_miner_thread_try_start( &mut self, registered_key: RegisteredKey, @@ -3898,11 +3895,13 @@ impl RelayerThread { true } - /// Start up a microblock miner thread if we can: - /// * no miner thread must be running already - /// * the miner must not be blocked - /// * we must have won the sortition on the stacks chain tip - /// Returns true if the thread was started; false if not. + /// Start up a microblock miner thread if possible: + /// * No miner thread must be running already + /// * The miner must not be blocked + /// * We must have won the sortition on the Stacks chain tip + /// + /// Returns `true` if the thread was started; `false` if not. + #[allow(clippy::incompatible_msrv)] pub fn microblock_miner_thread_try_start(&mut self) -> bool { let miner_tip = match self.miner_tip.as_ref() { Some(tip) => tip.clone(), @@ -4003,8 +4002,7 @@ impl RelayerThread { last_mined_block.burn_block_height, &self.last_mined_blocks, ) - .len() - == 0 + .is_empty() { // first time we've mined a block in this burnchain block debug!( @@ -4019,8 +4017,8 @@ impl RelayerThread { &last_mined_block.anchored_block.block_hash() ); - let bhh = last_mined_block.burn_hash.clone(); - let orig_bhh = last_mined_block.orig_burn_hash.clone(); + let bhh = last_mined_block.burn_hash; + let orig_bhh = last_mined_block.orig_burn_hash; let tenure_begin = last_mined_block.tenure_begin; self.last_mined_blocks.insert( @@ -4058,7 +4056,7 @@ impl RelayerThread { let num_mblocks = chainstate .unconfirmed_state .as_ref() - .map(|ref unconfirmed| unconfirmed.num_microblocks()) + .map(|unconfirmed| unconfirmed.num_microblocks()) .unwrap_or(0); (processed_unconfirmed_state, num_mblocks) @@ -4134,14 +4132,16 @@ impl RelayerThread { None } - /// Try to join with the miner thread. If we succeed, join the thread and return true. - /// Otherwise, if the thread is still running, return false; + /// Try to join with the miner thread. If successful, join the thread and return `true`. + /// Otherwise, if the thread is still running, return `false`. + /// /// Updates internal state gleaned from the miner, such as: - /// * new stacks block data - /// * new keychain state - /// * new metrics - /// * new unconfirmed state - /// Returns true if joined; false if not. + /// * New Stacks block data + /// * New keychain state + /// * New metrics + /// * New unconfirmed state + /// + /// Returns `true` if joined; `false` if not. pub fn miner_thread_try_join(&mut self) -> bool { if let Some(thread_handle) = self.miner_thread.take() { let new_thread_handle = self.inner_miner_thread_try_join(thread_handle); @@ -4193,7 +4193,7 @@ impl RelayerThread { RelayerDirective::RegisterKey(last_burn_block) => { let mut saved_key_opt = None; if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { - saved_key_opt = Self::load_saved_vrf_key(&path); + saved_key_opt = Self::load_saved_vrf_key(path); } if let Some(saved_key) = saved_key_opt { self.globals.resume_leader_key(saved_key); @@ -4266,8 +4266,8 @@ impl ParentStacksBlockInfo { ) -> Result { let stacks_tip_header = StacksChainState::get_anchored_block_header_info( chain_state.db(), - &mine_tip_ch, - &mine_tip_bh, + mine_tip_ch, + mine_tip_bh, ) .unwrap() .ok_or_else(|| { @@ -4358,9 +4358,9 @@ impl ParentStacksBlockInfo { Ok(ParentStacksBlockInfo { stacks_parent_header: stacks_tip_header, - parent_consensus_hash: mine_tip_ch.clone(), + parent_consensus_hash: *mine_tip_ch, parent_block_burn_height: parent_block_height, - parent_block_total_burn: parent_block_total_burn, + parent_block_total_burn, parent_winning_vtxindex, coinbase_nonce, }) @@ -4412,16 +4412,14 @@ impl PeerThread { .make_cost_metric() .unwrap_or_else(|| Box::new(UnitMetric)); - let mempool = MemPoolDB::open( + MemPoolDB::open( config.is_mainnet(), config.burnchain.chain_id, &config.get_chainstate_path_str(), cost_estimator, metric, ) - .expect("Database failure opening mempool"); - - mempool + .expect("Database failure opening mempool") } /// Instantiate the p2p thread. @@ -4531,6 +4529,7 @@ impl PeerThread { /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not + #[allow(clippy::borrowed_box)] pub fn run_one_pass( &mut self, indexer: &B, @@ -4542,7 +4541,7 @@ impl PeerThread { ) -> bool { // initial block download? let ibd = self.globals.sync_comms.get_ibd(); - let download_backpressure = self.results_with_data.len() > 0; + let download_backpressure = !self.results_with_data.is_empty(); let poll_ms = if !download_backpressure && self.get_network().has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( @@ -4566,11 +4565,7 @@ impl PeerThread { // NOTE: handler_args must be created such that it outlives the inner net.run() call and // doesn't ref anything within p2p_thread. let handler_args = RPCHandlerArgs { - exit_at_block_height: p2p_thread - .config - .burnchain - .process_exit_at_block_height - .clone(), + exit_at_block_height: p2p_thread.config.burnchain.process_exit_at_block_height, genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) .unwrap(), event_observer: Some(event_dispatcher), @@ -4726,32 +4721,32 @@ impl StacksNode { .make_cost_metric() .unwrap_or_else(|| Box::new(UnitMetric)); - let mempool = MemPoolDB::open( + MemPoolDB::open( config.is_mainnet(), config.burnchain.chain_id, &config.get_chainstate_path_str(), cost_estimator, metric, ) - .expect("BUG: failed to instantiate mempool"); - - mempool + .expect("BUG: failed to instantiate mempool") } - /// Set up the Peer DB and update any soft state from the config file. This includes: - /// * blacklisted/whitelisted nodes - /// * node keys - /// * bootstrap nodes - /// Returns the instantiated PeerDB + /// Set up the Peer DB and update any soft state from the config file. This includes: + /// * Blacklisted/whitelisted nodes + /// * Node keys + /// * Bootstrap nodes + /// + /// Returns the instantiated `PeerDB`. + /// /// Panics on failure. fn setup_peer_db( config: &Config, burnchain: &Burnchain, stackerdb_contract_ids: &[QualifiedContractIdentifier], ) -> PeerDB { - let data_url = UrlString::try_from(format!("{}", &config.node.data_url)).unwrap(); + let data_url = UrlString::try_from(config.node.data_url.to_string()).unwrap(); let initial_neighbors = config.node.bootstrap_node.clone(); - if initial_neighbors.len() > 0 { + if !initial_neighbors.is_empty() { info!( "Will bootstrap from peers {}", VecDisplay(&initial_neighbors) @@ -4778,7 +4773,7 @@ impl StacksNode { config.burnchain.chain_id, burnchain.network_id, Some(node_privkey), - config.connection_options.private_key_lifetime.clone(), + config.connection_options.private_key_lifetime, PeerAddress::from_socketaddr(&p2p_addr), p2p_sock.port(), data_url, @@ -4798,12 +4793,12 @@ impl StacksNode { // allow all bootstrap nodes { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for initial_neighbor in initial_neighbors.iter() { // update peer in case public key changed - PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap(); + PeerDB::update_peer(&tx, initial_neighbor).unwrap(); PeerDB::set_allow_peer( - &mut tx, + &tx, initial_neighbor.addr.network_id, &initial_neighbor.addr.addrbytes, initial_neighbor.addr.port, @@ -4820,10 +4815,10 @@ impl StacksNode { // deny all config-denied peers { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for denied in config.node.deny_nodes.iter() { PeerDB::set_deny_peer( - &mut tx, + &tx, denied.addr.network_id, &denied.addr.addrbytes, denied.addr.port, @@ -4836,9 +4831,9 @@ impl StacksNode { // update services to indicate we can support mempool sync and stackerdb { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); PeerDB::set_local_services( - &mut tx, + &tx, (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16) | (ServiceFlags::STACKERDB as u16), @@ -4867,7 +4862,7 @@ impl StacksNode { .expect("Error while loading stacks epochs"); let view = { - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("Failed to get sortition tip"); SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) .unwrap() @@ -4914,7 +4909,7 @@ impl StacksNode { _ => panic!("Unable to retrieve local peer"), }; - let p2p_net = PeerNetwork::new( + PeerNetwork::new( peerdb, atlasdb, stackerdbs, @@ -4925,9 +4920,7 @@ impl StacksNode { config.connection_options.clone(), stackerdb_machines, epochs, - ); - - p2p_net + ) } /// Main loop of the relayer. @@ -5223,9 +5216,9 @@ impl StacksNode { .globals .relay_send .send(RelayerDirective::ProcessTenure( - snapshot.consensus_hash.clone(), - snapshot.parent_burn_header_hash.clone(), - snapshot.winning_stacks_block_hash.clone(), + snapshot.consensus_hash, + snapshot.parent_burn_header_hash, + snapshot.winning_stacks_block_hash, )) .is_ok(); } @@ -5270,13 +5263,11 @@ impl StacksNode { block_height, op.apparent_sender, &op.block_header_hash ); last_sortitioned_block = Some((block_snapshot.clone(), op.vtxindex)); - } else { - if self.is_miner { - info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash - ); - } + } else if self.is_miner { + info!( + "Received burnchain block #{} including block_commit_op - {} ({})", + block_height, op.apparent_sender, &op.block_header_hash + ); } } @@ -5313,7 +5304,7 @@ impl StacksNode { return ret; }; - let mut f = match fs::File::create(&path) { + let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { warn!("Failed to create {}: {:?}", &path, &e); @@ -5321,13 +5312,13 @@ impl StacksNode { } }; - if let Err(e) = f.write_all(key_json.as_str().as_bytes()) { + if let Err(e) = f.write_all(key_json.as_bytes()) { warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); return ret; } info!("Saved activated VRF key to {}", &path); - return ret; + ret } /// Join all inner threads diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 1895912ba5..8aebd4814a 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -151,6 +151,7 @@ pub fn get_names(use_test_chainstate_data: bool) -> Box bool { impl Node { /// Instantiate and initialize a new node, given a config - pub fn new(config: Config, boot_block_exec: Box ()>) -> Self { + pub fn new(config: Config, boot_block_exec: Box) -> Self { let use_test_genesis_data = if config.burnchain.mode == "mocknet" { use_test_genesis_chainstate(&config) } else { @@ -407,14 +408,14 @@ impl Node { Config::assert_valid_epoch_settings(&burnchain, &epochs); let view = { - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("Failed to get sortition tip"); SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) .unwrap() }; // create a new peerdb - let data_url = UrlString::try_from(format!("{}", self.config.node.data_url)).unwrap(); + let data_url = UrlString::try_from(self.config.node.data_url.to_string()).unwrap(); let initial_neighbors = self.config.node.bootstrap_node.clone(); @@ -452,7 +453,7 @@ impl Node { self.config.burnchain.chain_id, burnchain.network_id, Some(node_privkey), - self.config.connection_options.private_key_lifetime.clone(), + self.config.connection_options.private_key_lifetime, PeerAddress::from_socketaddr(&p2p_addr), p2p_sock.port(), data_url, @@ -464,10 +465,10 @@ impl Node { println!("DENY NEIGHBORS {:?}", &self.config.node.deny_nodes); { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for denied in self.config.node.deny_nodes.iter() { PeerDB::set_deny_peer( - &mut tx, + &tx, denied.addr.network_id, &denied.addr.addrbytes, denied.addr.port, @@ -488,7 +489,7 @@ impl Node { }; let event_dispatcher = self.event_dispatcher.clone(); - let exit_at_block_height = self.config.burnchain.process_exit_at_block_height.clone(); + let exit_at_block_height = self.config.burnchain.process_exit_at_block_height; let p2p_net = PeerNetwork::new( peerdb, @@ -577,9 +578,9 @@ impl Node { // Registered key has been mined new_key = Some(RegisteredKey { vrf_public_key: op.public_key.clone(), - block_height: op.block_height as u64, - op_vtxindex: op.vtxindex as u32, - target_block_height: (op.block_height as u64) - 1, + block_height: op.block_height, + op_vtxindex: op.vtxindex, + target_block_height: op.block_height - 1, memo: op.memo.clone(), }); } @@ -649,7 +650,7 @@ impl Node { burnchain.pox_constants, ) .expect("Error while opening sortition db"); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query canonical burn chain tip"); // Generates a proof out of the sortition hash provided in the params. @@ -734,7 +735,7 @@ impl Node { anchored_block_from_ongoing_tenure.header.block_hash(), burn_fee, ®istered_key, - &burnchain_tip, + burnchain_tip, VRFSeed::from_proof(&vrf_proof), ); @@ -802,7 +803,7 @@ impl Node { .preprocess_anchored_block( &ic, consensus_hash, - &anchored_block, + anchored_block, &parent_consensus_hash, 0, ) @@ -813,7 +814,7 @@ impl Node { let res = self .chain_state .preprocess_streamed_microblock( - &consensus_hash, + consensus_hash, &anchored_block.block_hash(), microblock, ) @@ -849,31 +850,28 @@ impl Node { match process_blocks_at_tip { Err(e) => panic!("Error while processing block - {:?}", e), Ok(ref mut blocks) => { - if blocks.len() == 0 { + if blocks.is_empty() { break; } else { for block in blocks.iter() { - match block { - (Some(epoch_receipt), _) => { - let attachments_instances = - self.get_attachment_instances(epoch_receipt, &atlas_config); - if !attachments_instances.is_empty() { - for new_attachment in attachments_instances.into_iter() { - if let Err(e) = - atlas_db.queue_attachment_instance(&new_attachment) - { - warn!( - "Atlas: Error writing attachment instance to DB"; - "err" => ?e, - "index_block_hash" => %new_attachment.index_block_hash, - "contract_id" => %new_attachment.contract_id, - "attachment_index" => %new_attachment.attachment_index, - ); - } + if let (Some(epoch_receipt), _) = block { + let attachments_instances = + self.get_attachment_instances(epoch_receipt, &atlas_config); + if !attachments_instances.is_empty() { + for new_attachment in attachments_instances.into_iter() { + if let Err(e) = + atlas_db.queue_attachment_instance(&new_attachment) + { + warn!( + "Atlas: Error writing attachment instance to DB"; + "err" => ?e, + "index_block_hash" => %new_attachment.index_block_hash, + "contract_id" => %new_attachment.contract_id, + "attachment_index" => %new_attachment.attachment_index, + ); } } } - _ => {} } } @@ -990,7 +988,7 @@ impl Node { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo: vec![], - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 1, txid, block_height: 0, diff --git a/testnet/stacks-node/src/operations.rs b/testnet/stacks-node/src/operations.rs index 4680098d2b..0109077a5f 100644 --- a/testnet/stacks-node/src/operations.rs +++ b/testnet/stacks-node/src/operations.rs @@ -31,8 +31,7 @@ impl BurnchainOpSigner { } pub fn get_public_key(&mut self) -> Secp256k1PublicKey { - let public_key = Secp256k1PublicKey::from_private(&self.secret_key); - public_key + Secp256k1PublicKey::from_private(&self.secret_key) } pub fn sign_message(&mut self, hash: &[u8]) -> Option { diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 85ace37fa4..2333167334 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -137,8 +137,8 @@ impl BootRunLoop { /// node depending on the current burnchain height. pub fn start(&mut self, burnchain_opt: Option, mine_start: u64) { match self.active_loop { - InnerLoops::Epoch2(_) => return self.start_from_neon(burnchain_opt, mine_start), - InnerLoops::Epoch3(_) => return self.start_from_naka(burnchain_opt, mine_start), + InnerLoops::Epoch2(_) => self.start_from_neon(burnchain_opt, mine_start), + InnerLoops::Epoch3(_) => self.start_from_naka(burnchain_opt, mine_start), } } @@ -227,7 +227,7 @@ impl BootRunLoop { // if loop exited, do the transition info!("Epoch-3.0 boundary reached, stopping Epoch-2.x run loop"); neon_term_switch.store(false, Ordering::SeqCst); - return true + true }) } diff --git a/testnet/stacks-node/src/run_loop/helium.rs b/testnet/stacks-node/src/run_loop/helium.rs index 2922ce584a..c61581553c 100644 --- a/testnet/stacks-node/src/run_loop/helium.rs +++ b/testnet/stacks-node/src/run_loop/helium.rs @@ -21,10 +21,7 @@ impl RunLoop { } /// Sets up a runloop and node, given a config. - pub fn new_with_boot_exec( - config: Config, - boot_exec: Box ()>, - ) -> Self { + pub fn new_with_boot_exec(config: Config, boot_exec: Box) -> Self { // Build node based on config let node = Node::new(config.clone(), boot_exec); @@ -174,17 +171,14 @@ impl RunLoop { None => None, }; - match artifacts_from_tenure { - Some(ref artifacts) => { - // Have each node receive artifacts from the current tenure - self.node.commit_artifacts( - &artifacts.anchored_block, - &artifacts.parent_block, - &mut burnchain, - artifacts.burn_fee, - ); - } - None => {} + if let Some(artifacts) = &artifacts_from_tenure { + // Have each node receive artifacts from the current tenure + self.node.commit_artifacts( + &artifacts.anchored_block, + &artifacts.parent_block, + &mut burnchain, + artifacts.burn_fee, + ); } let (new_burnchain_tip, _) = burnchain.sync(None)?; diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index b2b9aa3f75..ce4c06a16c 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -37,6 +37,7 @@ macro_rules! info_green { }) } +#[allow(clippy::type_complexity)] pub struct RunLoopCallbacks { on_burn_chain_initialized: Option)>, on_new_burn_chain_state: Option, @@ -45,6 +46,12 @@ pub struct RunLoopCallbacks { on_new_tenure: Option, } +impl Default for RunLoopCallbacks { + fn default() -> Self { + Self::new() + } +} + impl RunLoopCallbacks { pub fn new() -> RunLoopCallbacks { RunLoopCallbacks { @@ -167,7 +174,7 @@ pub fn announce_boot_receipts( event_dispatcher: &mut EventDispatcher, chainstate: &StacksChainState, pox_constants: &PoxConstants, - boot_receipts: &Vec, + boot_receipts: &[StacksTransactionReceipt], ) { let block_header_0 = StacksChainState::get_genesis_header_info(chainstate.db()) .expect("FATAL: genesis block header not stored"); @@ -189,7 +196,7 @@ pub fn announce_boot_receipts( Txid([0x00; 32]), &[], None, - block_header_0.burn_header_hash.clone(), + block_header_0.burn_header_hash, block_header_0.burn_header_height, block_header_0.burn_header_timestamp, &ExecutionCost::zero(), diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 3d94b1c351..de836568d2 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -100,7 +100,7 @@ impl RunLoop { config, globals: None, coordinator_channels: Some(channels), - counters: counters.unwrap_or_else(|| Counters::new()), + counters: counters.unwrap_or_default(), should_keep_running, event_dispatcher, pox_watchdog: None, @@ -167,9 +167,8 @@ impl RunLoop { if self.config.node.miner { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); - match burnchain.create_wallet_if_dne() { - Err(e) => warn!("Error when creating wallet: {:?}", e), - _ => {} + if let Err(e) = burnchain.create_wallet_if_dne() { + warn!("Error when creating wallet: {:?}", e); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -285,7 +284,6 @@ impl RunLoop { let mut atlas_config = AtlasConfig::new(self.config.is_mainnet()); let genesis_attachments = GenesisData::new(use_test_genesis_data) .read_name_zonefiles() - .into_iter() .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec())) .collect(); atlas_config.genesis_attachments = Some(genesis_attachments); @@ -296,7 +294,7 @@ impl RunLoop { let moved_atlas_config = self.config.atlas.clone(); let moved_config = self.config.clone(); let moved_burnchain_config = burnchain_config.clone(); - let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let coordinator_dispatcher = self.event_dispatcher.clone(); let atlas_db = AtlasDB::connect( moved_atlas_config.clone(), &self.config.get_atlas_db_file_path(), @@ -325,13 +323,12 @@ impl RunLoop { require_affirmed_anchor_blocks: moved_config .node .require_affirmed_anchor_blocks, - ..ChainsCoordinatorConfig::new() }; ChainsCoordinator::run( coord_config, chain_state_db, moved_burnchain_config, - &mut coordinator_dispatcher, + &coordinator_dispatcher, coordinator_receivers, moved_atlas_config, cost_estimator.as_deref_mut(), @@ -382,7 +379,7 @@ impl RunLoop { Some(sn) => sn, None => { debug!("No canonical stacks chain tip hash present"); - let sn = SortitionDB::get_first_block_snapshot(&sortdb.conn()) + let sn = SortitionDB::get_first_block_snapshot(sortdb.conn()) .expect("BUG: failed to get first-ever block snapshot"); sn } @@ -477,7 +474,7 @@ impl RunLoop { // Make sure at least one sortition has happened, and make sure it's globally available let sortdb = burnchain.sortdb_mut(); let (rc_aligned_height, sn) = - RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); + RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config); let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height { // need at least one sortition to happen. diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index a18a61988b..7be8939d9e 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -369,9 +369,8 @@ impl RunLoop { if self.config.node.miner { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); - match burnchain.create_wallet_if_dne() { - Err(e) => warn!("Error when creating wallet: {:?}", e), - _ => {} + if let Err(e) = burnchain.create_wallet_if_dne() { + warn!("Error when creating wallet: {:?}", e); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -490,14 +489,11 @@ impl RunLoop { burnchain_controller .start(Some(target_burnchain_block_height)) .map_err(|e| { - match e { - Error::CoordinatorClosed => { - if !should_keep_running.load(Ordering::SeqCst) { - info!("Shutdown initiated during burnchain initialization: {}", e); - return burnchain_error::ShutdownInitiated; - } - } - _ => {} + if matches!(e, Error::CoordinatorClosed) + && !should_keep_running.load(Ordering::SeqCst) + { + info!("Shutdown initiated during burnchain initialization: {}", e); + return burnchain_error::ShutdownInitiated; } error!("Burnchain controller stopped: {}", e); panic!(); @@ -581,7 +577,6 @@ impl RunLoop { let mut atlas_config = AtlasConfig::new(self.config.is_mainnet()); let genesis_attachments = GenesisData::new(use_test_genesis_data) .read_name_zonefiles() - .into_iter() .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec())) .collect(); atlas_config.genesis_attachments = Some(genesis_attachments); @@ -592,7 +587,7 @@ impl RunLoop { let moved_atlas_config = self.config.atlas.clone(); let moved_config = self.config.clone(); let moved_burnchain_config = burnchain_config.clone(); - let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let coordinator_dispatcher = self.event_dispatcher.clone(); let atlas_db = AtlasDB::connect( moved_atlas_config.clone(), &self.config.get_atlas_db_file_path(), @@ -621,13 +616,12 @@ impl RunLoop { require_affirmed_anchor_blocks: moved_config .node .require_affirmed_anchor_blocks, - ..ChainsCoordinatorConfig::new() }; ChainsCoordinator::run( coord_config, chain_state_db, moved_burnchain_config, - &mut coordinator_dispatcher, + &coordinator_dispatcher, coordinator_receivers, moved_atlas_config, cost_estimator.as_deref_mut(), @@ -685,7 +679,7 @@ impl RunLoop { Some(sn) => sn, None => { debug!("No canonical stacks chain tip hash present"); - let sn = SortitionDB::get_first_block_snapshot(&sortdb.conn()) + let sn = SortitionDB::get_first_block_snapshot(sortdb.conn()) .expect("BUG: failed to get first-ever block snapshot"); sn } @@ -737,7 +731,7 @@ impl RunLoop { let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, @@ -885,7 +879,7 @@ impl RunLoop { let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, @@ -899,11 +893,11 @@ impl RunLoop { }; let canonical_affirmation_map = match static_get_canonical_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, - &chain_state_db, + chain_state_db, &sn.sortition_id, ) { Ok(am) => am, @@ -1018,15 +1012,13 @@ impl RunLoop { ) .unwrap(); - let liveness_thread_handle = thread::Builder::new() + thread::Builder::new() .name(format!("chain-liveness-{}", config.node.rpc_bind)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { Self::drive_chain_liveness(globals, config, burnchain, sortdb, chain_state_db) }) - .expect("FATAL: failed to spawn chain liveness thread"); - - liveness_thread_handle + .expect("FATAL: failed to spawn chain liveness thread") } /// Starts the node runloop. @@ -1109,7 +1101,7 @@ impl RunLoop { // Make sure at least one sortition has happened, and make sure it's globally available let sortdb = burnchain.sortdb_mut(); let (rc_aligned_height, sn) = - RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); + RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config); let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height { // need at least one sortition to happen. @@ -1137,7 +1129,7 @@ impl RunLoop { .tx_begin() .expect("FATAL: failed to begin burnchain DB tx"); for (reward_cycle, affirmation) in self.config.burnchain.affirmation_overrides.iter() { - tx.set_override_affirmation_map(*reward_cycle, affirmation.clone()).expect(&format!("FATAL: failed to set affirmation override ({affirmation}) for reward cycle {reward_cycle}")); + tx.set_override_affirmation_map(*reward_cycle, affirmation.clone()).unwrap_or_else(|_| panic!("FATAL: failed to set affirmation override ({affirmation}) for reward cycle {reward_cycle}")); } tx.commit() .expect("FATAL: failed to commit burnchain DB tx"); diff --git a/testnet/stacks-node/src/stacks_events.rs b/testnet/stacks-node/src/stacks_events.rs index f63b17a6ab..2f96bbfe66 100644 --- a/testnet/stacks-node/src/stacks_events.rs +++ b/testnet/stacks-node/src/stacks_events.rs @@ -92,7 +92,7 @@ fn handle_connection(mut stream: TcpStream) { contents ); - stream.write(response.as_bytes()).unwrap(); + let _nmb_bytes = stream.write(response.as_bytes()).unwrap(); stream.flush().unwrap(); } } diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index ff68126a83..d4c05ec7fe 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -69,7 +69,7 @@ impl PoxSyncWatchdogComms { self.interruptable_sleep(1)?; std::hint::spin_loop(); } - return Ok(true); + Ok(true) } fn interruptable_sleep(&self, secs: u64) -> Result<(), burnchain_error> { @@ -95,7 +95,7 @@ impl PoxSyncWatchdogComms { self.interruptable_sleep(1)?; std::hint::spin_loop(); } - return Ok(true); + Ok(true) } pub fn should_keep_running(&self) -> bool { @@ -192,7 +192,7 @@ impl PoxSyncWatchdog { new_processed_blocks: VecDeque::new(), last_attachable_query: 0, last_processed_query: 0, - max_samples: max_samples, + max_samples, max_staging: 10, watch_start_ts: 0, last_block_processed_ts: 0, @@ -200,7 +200,7 @@ impl PoxSyncWatchdog { estimated_block_process_time: 5.0, steady_state_burnchain_sync_interval: burnchain_poll_time, steady_state_resync_ts: 0, - chainstate: chainstate, + chainstate, relayer_comms: watchdog_comms, }) } @@ -213,7 +213,7 @@ impl PoxSyncWatchdog { fn count_attachable_stacks_blocks(&mut self) -> Result { // number of staging blocks that have arrived since the last sortition let cnt = StacksChainState::count_attachable_staging_blocks( - &self.chainstate.db(), + self.chainstate.db(), self.max_staging, self.last_attachable_query, ) @@ -229,7 +229,7 @@ impl PoxSyncWatchdog { fn count_processed_stacks_blocks(&mut self) -> Result { // number of staging blocks that have arrived since the last sortition let cnt = StacksChainState::count_processed_staging_blocks( - &self.chainstate.db(), + self.chainstate.db(), self.max_staging, self.last_processed_query, ) @@ -281,7 +281,7 @@ impl PoxSyncWatchdog { /// Is a derivative approximately flat, with a maximum absolute deviation from 0? /// Return whether or not the sample is mostly flat, and how many points were over the given /// error bar in either direction. - fn is_mostly_flat(deriv: &Vec, error: i64) -> (bool, usize) { + fn is_mostly_flat(deriv: &[i64], error: i64) -> (bool, usize) { let mut total_deviates = 0; let mut ret = true; for d in deriv.iter() { @@ -294,7 +294,7 @@ impl PoxSyncWatchdog { } /// low and high pass filter average -- take average without the smallest and largest values - fn hilo_filter_avg(samples: &Vec) -> f64 { + fn hilo_filter_avg(samples: &[i64]) -> f64 { // take average with low and high pass let mut min = i64::MAX; let mut max = i64::MIN; @@ -358,7 +358,7 @@ impl PoxSyncWatchdog { } let block_wait_times = - StacksChainState::measure_block_wait_time(&chainstate.db(), start_height, end_height) + StacksChainState::measure_block_wait_time(chainstate.db(), start_height, end_height) .expect("BUG: failed to query chainstate block-processing times"); PoxSyncWatchdog::hilo_filter_avg(&block_wait_times) @@ -386,7 +386,7 @@ impl PoxSyncWatchdog { } let block_download_times = StacksChainState::measure_block_download_time( - &chainstate.db(), + chainstate.db(), start_height, end_height, ) diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index 5dd67cddab..7322133889 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -41,7 +41,8 @@ pub struct Tenure { parent_block_total_burn: u64, } -impl<'a> Tenure { +impl Tenure { + #[allow(clippy::too_many_arguments)] pub fn new( parent_block: ChainTip, coinbase_tx: StacksTransaction, @@ -82,7 +83,7 @@ impl<'a> Tenure { elapsed = Instant::now().duration_since(self.burnchain_tip.received_at); } - let (mut chain_state, _) = StacksChainState::open( + let (chain_state, _) = StacksChainState::open( self.config.is_mainnet(), self.config.burnchain.chain_id, &self.config.get_chainstate_path_str(), @@ -91,13 +92,13 @@ impl<'a> Tenure { .unwrap(); let (anchored_block, _, _) = StacksBlockBuilder::build_anchored_block( - &mut chain_state, + &chain_state, burn_dbconn, &mut self.mem_pool, &self.parent_block.metadata, self.parent_block_total_burn, self.vrf_proof.clone(), - self.microblock_pubkeyhash.clone(), + self.microblock_pubkeyhash, &self.coinbase_tx, BlockBuilderSettings::limited(), None, diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 90b1310183..702f6d5953 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -43,21 +43,18 @@ impl BitcoinCoreController { fn add_rpc_cli_args(&self, command: &mut Command) { command.arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); - match ( + if let (Some(username), Some(password)) = ( &self.config.burnchain.username, &self.config.burnchain.password, ) { - (Some(username), Some(password)) => { - command - .arg(format!("-rpcuser={username}")) - .arg(format!("-rpcpassword={password}")); - } - _ => {} + command + .arg(format!("-rpcuser={username}")) + .arg(format!("-rpcpassword={password}")); } } pub fn start_bitcoind(&mut self) -> BitcoinResult<()> { - std::fs::create_dir_all(&self.config.get_burnchain_path_str()).unwrap(); + std::fs::create_dir_all(self.config.get_burnchain_path_str()).unwrap(); let mut command = Command::new("bitcoind"); command @@ -104,7 +101,7 @@ impl BitcoinCoreController { } pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { - if let Some(_) = self.bitcoind_process.take() { + if self.bitcoind_process.take().is_some() { let payload = BitcoinRPCRequest { method: "stop".to_string(), params: vec![], @@ -217,11 +214,11 @@ fn bitcoind_integration(segwit_flag: bool) { .callbacks .on_new_burn_chain_state(|round, burnchain_tip, chain_tip| { let block = &burnchain_tip.block_snapshot; - let expected_total_burn = BITCOIND_INT_TEST_COMMITS * (round as u64 + 1); + let expected_total_burn = BITCOIND_INT_TEST_COMMITS * (round + 1); assert_eq!(block.total_burn, expected_total_burn); - assert_eq!(block.sortition, true); - assert_eq!(block.num_sortitions, round as u64 + 1); - assert_eq!(block.block_height, round as u64 + 2003); + assert!(block.sortition); + assert_eq!(block.num_sortitions, round + 1); + assert_eq!(block.block_height, round + 2003); let leader_key = "f888e0cab5c16de8edf72b544a189ece5c0b95cd9178606c970789ac71d17bb4"; match round { @@ -246,7 +243,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert!(op.parent_vtxindex == 0); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } } @@ -270,7 +267,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2003); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -299,7 +296,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2004); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -328,7 +325,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2005); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -357,7 +354,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2006); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -386,7 +383,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2007); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -464,7 +461,6 @@ fn bitcoind_integration(segwit_flag: bool) { }, _ => {} }; - return }); // Use block's hook for asserting expectations diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 076a5f61f3..6fe0018ced 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -19,9 +19,7 @@ use stacks::core::{ PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, }; use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksAddress, VRFSeed, -}; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRFSeed}; use stacks_common::util::hash::hex_bytes; use stacks_common::util::sleep_ms; @@ -50,7 +48,7 @@ fn test_exact_block_costs() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let epoch_205_transition_height = 210; let transactions_to_broadcast = 25; @@ -256,10 +254,8 @@ fn test_exact_block_costs() { if dbget_txs.len() >= 2 { processed_txs_before_205 = true; } - } else { - if dbget_txs.len() >= 2 { - processed_txs_after_205 = true; - } + } else if dbget_txs.len() >= 2 { + processed_txs_after_205 = true; } assert_eq!(mined_anchor_cost, anchor_cost as u64); @@ -287,7 +283,7 @@ fn test_dynamic_db_method_costs() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let contract_name = "test-contract"; let epoch_205_transition_height = 210; @@ -455,8 +451,7 @@ fn test_dynamic_db_method_costs() { .as_i64() .unwrap(); eprintln!( - "Burn height = {}, runtime_cost = {}, function_name = {}", - burn_height, runtime_cost, function_name + "Burn height = {burn_height}, runtime_cost = {runtime_cost}, function_name = {function_name}" ); if function_name == "db-get1" { @@ -569,21 +564,20 @@ fn transition_empty_blocks() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height == epoch_2_05 { @@ -831,7 +825,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::SmartContract(contract, ..) => { - contract.name == ContractName::try_from("increment-contract").unwrap() + contract.name == ContractName::from("increment-contract") } _ => false, }, @@ -847,7 +841,7 @@ fn test_cost_limit_switch_version205() { 0, 1000, conf.burnchain.chain_id, - &creator_addr.into(), + &creator_addr, "increment-contract", "increment-many", &[], @@ -863,7 +857,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::ContractCall(contract) => { - contract.contract_name == ContractName::try_from("increment-contract").unwrap() + contract.contract_name == ContractName::from("increment-contract") } _ => false, }, @@ -882,7 +876,7 @@ fn test_cost_limit_switch_version205() { 0, 1000, conf.burnchain.chain_id, - &creator_addr.into(), + &creator_addr, "increment-contract", "increment-many", &[], @@ -897,7 +891,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::ContractCall(contract) => { - contract.contract_name == ContractName::try_from("increment-contract").unwrap() + contract.contract_name == ContractName::from("increment-contract") } _ => false, }, @@ -916,10 +910,7 @@ fn bigger_microblock_streams_in_2_05() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -993,7 +984,7 @@ fn bigger_microblock_streams_in_2_05() { 0, 1049230, conf.burnchain.chain_id, - &format!("large-{}", ix), + &format!("large-{ix}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -1035,9 +1026,8 @@ fn bigger_microblock_streams_in_2_05() { ) ) (begin - (crash-me \"{}\")) - ", - &format!("large-contract-{}", &ix) + (crash-me \"large-contract-{ix}\")) + " ) ) }) @@ -1176,9 +1166,9 @@ fn bigger_microblock_streams_in_2_05() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("costs-2").is_some() { + if tsc.name.to_string().contains("costs-2") { in_205 = true; - } else if tsc.name.to_string().find("large").is_some() { + } else if tsc.name.to_string().contains("large") { num_big_microblock_txs += 1; if in_205 { total_big_txs_per_microblock_205 += 1; @@ -1209,7 +1199,7 @@ fn bigger_microblock_streams_in_2_05() { max_big_txs_per_microblock_20 = num_big_microblock_txs; } - eprintln!("Epoch size: {:?}", &total_execution_cost); + eprintln!("Epoch size: {total_execution_cost:?}"); if !in_205 && total_execution_cost.exceeds(&epoch_20_stream_cost) { epoch_20_stream_cost = total_execution_cost; @@ -1232,21 +1222,13 @@ fn bigger_microblock_streams_in_2_05() { } eprintln!( - "max_big_txs_per_microblock_20: {}, total_big_txs_per_microblock_20: {}", - max_big_txs_per_microblock_20, total_big_txs_per_microblock_20 - ); - eprintln!( - "max_big_txs_per_microblock_205: {}, total_big_txs_per_microblock_205: {}", - max_big_txs_per_microblock_205, total_big_txs_per_microblock_205 - ); - eprintln!( - "confirmed stream execution in 2.0: {:?}", - &epoch_20_stream_cost + "max_big_txs_per_microblock_20: {max_big_txs_per_microblock_20}, total_big_txs_per_microblock_20: {total_big_txs_per_microblock_20}" ); eprintln!( - "confirmed stream execution in 2.05: {:?}", - &epoch_205_stream_cost + "max_big_txs_per_microblock_205: {max_big_txs_per_microblock_205}, total_big_txs_per_microblock_205: {total_big_txs_per_microblock_205}" ); + eprintln!("confirmed stream execution in 2.0: {epoch_20_stream_cost:?}"); + eprintln!("confirmed stream execution in 2.05: {epoch_205_stream_cost:?}"); // stuff happened assert!(epoch_20_stream_cost.runtime > 0); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 8f6c466318..ebe14bae16 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1,6 +1,7 @@ use std::collections::{HashMap, HashSet}; use std::{env, thread}; +use ::core::str; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::ClarityVersion; use stacks::burnchains::bitcoin::address::{ @@ -46,7 +47,7 @@ use crate::tests::neon_integrations::*; use crate::tests::*; use crate::{neon, BitcoinRegtestController, BurnchainController, Keychain}; -const MINER_BURN_PUBLIC_KEY: &'static str = +const MINER_BURN_PUBLIC_KEY: &str = "03dc62fe0b8964d01fc9ca9a5eec0e22e557a12cc656919e648f04e0b26fea5faa"; fn advance_to_2_1( @@ -210,7 +211,7 @@ fn advance_to_2_1( ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) @@ -264,13 +265,13 @@ fn advance_to_2_1( assert_eq!(account.nonce, 9); eprintln!("Begin Stacks 2.1"); - return ( + ( conf, btcd_controller, btc_regtest_controller, blocks_processed, channel, - ); + ) } #[test] @@ -285,7 +286,7 @@ fn transition_adds_burn_block_height() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let (conf, _btcd_controller, mut btc_regtest_controller, blocks_processed, coord_channel) = advance_to_2_1( @@ -411,9 +412,8 @@ fn transition_adds_burn_block_height() { // strip leading `0x` eprintln!("{:#?}", &cev); let clarity_serialized_value = hex_bytes( - &String::from_utf8( - cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..] - .to_vec(), + str::from_utf8( + &cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..], ) .unwrap(), ) @@ -544,7 +544,7 @@ fn transition_fixes_bitcoin_rigidity() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let _spender_btc_addr = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Regtest, LegacyBitcoinAddressType::PublicKeyHash, @@ -554,7 +554,7 @@ fn transition_fixes_bitcoin_rigidity() { let spender_2_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk); - let spender_2_addr: PrincipalData = spender_2_stx_addr.clone().into(); + let spender_2_addr: PrincipalData = spender_2_stx_addr.into(); let epoch_2_05 = 210; let epoch_2_1 = 215; @@ -655,7 +655,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op for a transfer-stx op that will get mined before the 2.1 epoch let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -687,8 +687,8 @@ fn transition_fixes_bitcoin_rigidity() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -698,7 +698,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -728,7 +728,7 @@ fn transition_fixes_bitcoin_rigidity() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) @@ -812,7 +812,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -840,8 +840,8 @@ fn transition_fixes_bitcoin_rigidity() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -851,7 +851,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -885,7 +885,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -914,8 +914,8 @@ fn transition_fixes_bitcoin_rigidity() { // let's fire off our transfer op. let transfer_stx_op = TransferStxOp { - sender: spender_2_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_2_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -925,7 +925,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -952,7 +952,7 @@ fn transition_fixes_bitcoin_rigidity() { // let's fire off another transfer op that will fall outside the window let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -980,8 +980,8 @@ fn transition_fixes_bitcoin_rigidity() { }; let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 123, memo: vec![], // to be filled in @@ -991,7 +991,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -1070,11 +1070,7 @@ fn transition_adds_get_pox_addr_recipients() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (conf, _btcd_controller, mut btc_regtest_controller, blocks_processed, coord_channel) = advance_to_2_1(initial_balances, None, Some(pox_constants.clone()), false); @@ -1094,7 +1090,7 @@ fn transition_adds_get_pox_addr_recipients() { .iter() .enumerate() { - let spender_sk = spender_sks[i].clone(); + let spender_sk = spender_sks[i]; let pox_addr_tuple = execute( &format!( "{{ hashbytes: 0x{}, version: 0x{:02x} }}", @@ -1126,9 +1122,8 @@ fn transition_adds_get_pox_addr_recipients() { } // stack some STX to segwit addressses - for i in 4..7 { - let spender_sk = spender_sks[i].clone(); - let pubk = Secp256k1PublicKey::from_private(&spender_sk); + for (i, spender_sk) in spender_sks.iter().enumerate().take(7).skip(4) { + let pubk = Secp256k1PublicKey::from_private(spender_sk); let version = i as u8; let bytes = match i { 4 => { @@ -1147,7 +1142,7 @@ fn transition_adds_get_pox_addr_recipients() { .unwrap() .unwrap(); let tx = make_contract_call( - &spender_sk, + spender_sk, 0, 300, conf.burnchain.chain_id, @@ -1183,7 +1178,7 @@ fn transition_adds_get_pox_addr_recipients() { ) "; - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sks[0])); + let spender_addr_c32 = to_addr(&spender_sks[0]); let contract_tx = make_contract_publish( &spender_sks[0], 1, @@ -1202,9 +1197,7 @@ fn transition_adds_get_pox_addr_recipients() { // mine through two reward cycles // now let's mine until the next reward cycle starts ... - while sort_height - < (stack_sort_height as u64) + (((2 * pox_constants.reward_cycle_length) + 1) as u64) - { + while sort_height < stack_sort_height + (((2 * pox_constants.reward_cycle_length) + 1) as u64) { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = coord_channel.get_sortitions_processed(); eprintln!("Sort height: {}", sort_height); @@ -1244,13 +1237,12 @@ fn transition_adds_get_pox_addr_recipients() { let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if parsed.txid() == cc_txid { // check events for this block - for (_i, event) in events.iter().enumerate() { + for event in events.iter() { if let Some(cev) = event.get("contract_event") { // strip leading `0x` let clarity_serialized_value = hex_bytes( - &String::from_utf8( - cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..] - .to_vec(), + str::from_utf8( + &cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..], ) .unwrap(), ) @@ -1313,9 +1305,10 @@ fn transition_adds_get_pox_addr_recipients() { for pox_addr_value in pox_addr_tuples.into_iter() { let pox_addr = - PoxAddress::try_from_pox_tuple(false, &pox_addr_value).expect( - &format!("FATAL: invalid PoX tuple {:?}", &pox_addr_value), - ); + PoxAddress::try_from_pox_tuple(false, &pox_addr_value) + .unwrap_or_else(|| { + panic!("FATAL: invalid PoX tuple {pox_addr_value:?}") + }); eprintln!("at {}: {:?}", burn_block_height, &pox_addr); if !pox_addr.is_burn() { found_pox_addrs.insert(pox_addr); @@ -1388,7 +1381,7 @@ fn transition_adds_mining_from_segwit() { let utxos = btc_regtest_controller .get_all_utxos(&Secp256k1PublicKey::from_hex(MINER_BURN_PUBLIC_KEY).unwrap()); - assert!(utxos.len() > 0); + assert!(!utxos.is_empty()); // all UTXOs should be segwit for utxo in utxos.iter() { @@ -1428,7 +1421,7 @@ fn transition_adds_mining_from_segwit() { SortitionDB::get_block_commits_by_block(sortdb.conn(), &tip.sortition_id).unwrap(); assert_eq!(commits.len(), 1); - let txid = commits[0].txid.clone(); + let txid = commits[0].txid; let tx = btc_regtest_controller.get_raw_transaction(&txid); eprintln!("tx = {:?}", &tx); @@ -1462,11 +1455,7 @@ fn transition_removes_pox_sunset() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -1518,8 +1507,8 @@ fn transition_removes_pox_sunset() { 4 * prepare_phase_len / 5, 5, 15, - (sunset_start_rc * reward_cycle_len - 1).into(), - (sunset_end_rc * reward_cycle_len).into(), + sunset_start_rc * reward_cycle_len - 1, + sunset_end_rc * reward_cycle_len, (epoch_21 as u32) + 1, u32::MAX, u32::MAX, @@ -1573,11 +1562,8 @@ fn transition_removes_pox_sunset() { let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 0); let tx = make_contract_call( @@ -1617,11 +1603,8 @@ fn transition_removes_pox_sunset() { // pox must activate let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!("pox_info in pox-1 = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert!(pox_info.current_cycle.is_pox_active); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); // advance to 2.1 while sort_height <= epoch_21 + 1 { @@ -1636,11 +1619,8 @@ fn transition_removes_pox_sunset() { // though the v1 block height has passed, the pox-2 contract won't be managing reward sets // until the next reward cycle eprintln!("pox_info in pox-2 = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox-2") - ); + assert!(pox_info.current_cycle.is_pox_active); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox-2"); // re-stack let tx = make_contract_call( @@ -1677,7 +1657,7 @@ fn transition_removes_pox_sunset() { ); let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + assert!(pox_info.current_cycle.is_pox_active); // get pox back online while sort_height <= epoch_21 + reward_cycle_len { @@ -1688,13 +1668,10 @@ fn transition_removes_pox_sunset() { let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!("pox_info = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + assert!(pox_info.current_cycle.is_pox_active); // first full reward cycle with pox-2 - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox-2") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox-2"); let burn_blocks = test_observer::get_burn_blocks(); let mut pox_out_opt = None; @@ -1719,9 +1696,9 @@ fn transition_removes_pox_sunset() { if (i as u64) < (sunset_start_rc * reward_cycle_len) { // before sunset - if recipients.len() >= 1 { + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { - pox_out_opt = if let Some(pox_out) = pox_out_opt.clone() { + pox_out_opt = if let Some(pox_out) = pox_out_opt { Some(std::cmp::max(amt, pox_out)) } else { Some(amt) @@ -1730,16 +1707,16 @@ fn transition_removes_pox_sunset() { } } else if (i as u64) >= (sunset_start_rc * reward_cycle_len) && (i as u64) + 1 < epoch_21 { // some sunset burn happened - let pox_out = pox_out_opt.clone().unwrap(); - if recipients.len() >= 1 { + let pox_out = pox_out_opt.unwrap(); + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { assert!(amt < pox_out); } } } else if (i as u64) + 1 >= epoch_21 { // no sunset burn happened - let pox_out = pox_out_opt.clone().unwrap(); - if recipients.len() >= 1 { + let pox_out = pox_out_opt.unwrap(); + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { // NOTE: odd number of reward cycles if !burnchain_config.is_in_prepare_phase((i + 2) as u64) { @@ -1875,7 +1852,7 @@ fn transition_empty_blocks() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) @@ -1987,7 +1964,7 @@ pub fn wait_pox_stragglers(confs: &[Config], max_stacks_tip: u64, block_time_ms: let mut stacks_tip_bhh = None; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.stacks_tip_height < max_stacks_tip { @@ -2057,15 +2034,9 @@ fn test_pox_reorgs_three_flaps() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -2112,7 +2083,7 @@ fn test_pox_reorgs_three_flaps() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -2134,12 +2105,11 @@ fn test_pox_reorgs_three_flaps() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -2151,8 +2121,8 @@ fn test_pox_reorgs_three_flaps() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -2187,10 +2157,10 @@ fn test_pox_reorgs_three_flaps() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -2215,8 +2185,8 @@ fn test_pox_reorgs_three_flaps() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -2238,10 +2208,10 @@ fn test_pox_reorgs_three_flaps() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); if tip_info.stacks_tip_height > 0 { @@ -2250,11 +2220,7 @@ fn test_pox_reorgs_three_flaps() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -2264,19 +2230,14 @@ fn test_pox_reorgs_three_flaps() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -2311,11 +2272,9 @@ fn test_pox_reorgs_three_flaps() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -2325,7 +2284,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -2344,7 +2303,7 @@ fn test_pox_reorgs_three_flaps() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -2353,7 +2312,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); @@ -2374,7 +2333,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2386,7 +2345,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -2399,7 +2358,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2411,7 +2370,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history overtakes miner 0's. @@ -2428,7 +2387,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2440,7 +2399,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history continues to overtake miner 0's. @@ -2457,7 +2416,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2469,7 +2428,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 0 may have won here, but its affirmation map isn't yet the heaviest. @@ -2484,7 +2443,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2496,7 +2455,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 0's affirmation map now becomes the heaviest. @@ -2511,7 +2470,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2524,7 +2483,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 0's affirmation map is now the heaviest, and there's no longer a tie. @@ -2538,7 +2497,7 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2554,7 +2513,7 @@ fn test_pox_reorgs_three_flaps() { // nodes now agree on affirmation maps for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -2599,15 +2558,9 @@ fn test_pox_reorg_one_flap() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -2654,7 +2607,7 @@ fn test_pox_reorg_one_flap() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -2674,12 +2627,11 @@ fn test_pox_reorg_one_flap() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -2691,8 +2643,8 @@ fn test_pox_reorg_one_flap() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -2727,10 +2679,10 @@ fn test_pox_reorg_one_flap() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -2755,8 +2707,8 @@ fn test_pox_reorg_one_flap() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -2778,10 +2730,10 @@ fn test_pox_reorg_one_flap() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); if tip_info.stacks_tip_height > 0 { @@ -2790,11 +2742,7 @@ fn test_pox_reorg_one_flap() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -2804,19 +2752,14 @@ fn test_pox_reorg_one_flap() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -2851,11 +2794,9 @@ fn test_pox_reorg_one_flap() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -2865,7 +2806,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -2884,7 +2825,7 @@ fn test_pox_reorg_one_flap() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -2893,7 +2834,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -2912,7 +2853,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2924,7 +2865,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -2937,7 +2878,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2950,7 +2891,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history overtakes miner 0's. @@ -2966,7 +2907,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -2982,7 +2923,7 @@ fn test_pox_reorg_one_flap() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -3025,15 +2966,9 @@ fn test_pox_reorg_flap_duel() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3080,7 +3015,7 @@ fn test_pox_reorg_flap_duel() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3102,12 +3037,12 @@ fn test_pox_reorg_flap_duel() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); - confs[i].node.set_bootstrap_nodes( + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -3119,8 +3054,8 @@ fn test_pox_reorg_flap_duel() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -3155,10 +3090,10 @@ fn test_pox_reorg_flap_duel() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -3183,8 +3118,8 @@ fn test_pox_reorg_flap_duel() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -3206,10 +3141,10 @@ fn test_pox_reorg_flap_duel() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); if tip_info.stacks_tip_height > 0 { @@ -3218,11 +3153,7 @@ fn test_pox_reorg_flap_duel() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -3232,19 +3163,14 @@ fn test_pox_reorg_flap_duel() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -3279,11 +3205,9 @@ fn test_pox_reorg_flap_duel() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -3293,7 +3217,7 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -3312,7 +3236,7 @@ fn test_pox_reorg_flap_duel() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -3321,7 +3245,7 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); @@ -3349,7 +3273,7 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -3362,7 +3286,7 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -3375,7 +3299,7 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -3387,7 +3311,7 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history overtakes miner 0's. @@ -3404,7 +3328,7 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -3423,7 +3347,7 @@ fn test_pox_reorg_flap_duel() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -3465,15 +3389,9 @@ fn test_pox_reorg_flap_reward_cycles() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3520,7 +3438,7 @@ fn test_pox_reorg_flap_reward_cycles() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3540,12 +3458,11 @@ fn test_pox_reorg_flap_reward_cycles() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -3557,8 +3474,8 @@ fn test_pox_reorg_flap_reward_cycles() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in confs.iter() { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -3593,10 +3510,10 @@ fn test_pox_reorg_flap_reward_cycles() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -3621,8 +3538,8 @@ fn test_pox_reorg_flap_reward_cycles() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -3644,10 +3561,10 @@ fn test_pox_reorg_flap_reward_cycles() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); if tip_info.stacks_tip_height > 0 { @@ -3656,11 +3573,7 @@ fn test_pox_reorg_flap_reward_cycles() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -3670,19 +3583,14 @@ fn test_pox_reorg_flap_reward_cycles() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -3717,11 +3625,9 @@ fn test_pox_reorg_flap_reward_cycles() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -3731,7 +3637,7 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -3750,7 +3656,7 @@ fn test_pox_reorg_flap_reward_cycles() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -3759,7 +3665,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -3785,7 +3691,7 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -3794,7 +3700,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -3808,7 +3714,7 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } } @@ -3816,7 +3722,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history overtakes miner 0's. @@ -3833,7 +3739,7 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -3852,7 +3758,7 @@ fn test_pox_reorg_flap_reward_cycles() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -3897,15 +3803,9 @@ fn test_pox_missing_five_anchor_blocks() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3952,7 +3852,7 @@ fn test_pox_missing_five_anchor_blocks() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3972,12 +3872,11 @@ fn test_pox_missing_five_anchor_blocks() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -3989,8 +3888,8 @@ fn test_pox_missing_five_anchor_blocks() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -4025,10 +3924,10 @@ fn test_pox_missing_five_anchor_blocks() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -4053,8 +3952,8 @@ fn test_pox_missing_five_anchor_blocks() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -4076,10 +3975,10 @@ fn test_pox_missing_five_anchor_blocks() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); if tip_info.stacks_tip_height > 0 { @@ -4088,11 +3987,7 @@ fn test_pox_missing_five_anchor_blocks() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -4102,19 +3997,14 @@ fn test_pox_missing_five_anchor_blocks() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -4149,11 +4039,9 @@ fn test_pox_missing_five_anchor_blocks() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -4163,7 +4051,7 @@ fn test_pox_missing_five_anchor_blocks() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -4182,7 +4070,7 @@ fn test_pox_missing_five_anchor_blocks() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -4191,7 +4079,7 @@ fn test_pox_missing_five_anchor_blocks() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -4212,7 +4100,7 @@ fn test_pox_missing_five_anchor_blocks() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -4223,7 +4111,7 @@ fn test_pox_missing_five_anchor_blocks() { signal_mining_ready(miner_status[1].clone()); info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -4236,7 +4124,7 @@ fn test_pox_missing_five_anchor_blocks() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -4253,7 +4141,7 @@ fn test_pox_missing_five_anchor_blocks() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -4297,15 +4185,9 @@ fn test_sortition_divergence_pre_21() { epochs[3].start_height = 241; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -4352,7 +4234,7 @@ fn test_sortition_divergence_pre_21() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -4376,12 +4258,11 @@ fn test_sortition_divergence_pre_21() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -4393,8 +4274,8 @@ fn test_sortition_divergence_pre_21() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -4429,10 +4310,10 @@ fn test_sortition_divergence_pre_21() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -4457,8 +4338,8 @@ fn test_sortition_divergence_pre_21() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -4480,10 +4361,10 @@ fn test_sortition_divergence_pre_21() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); if tip_info.stacks_tip_height > 0 { @@ -4492,11 +4373,7 @@ fn test_sortition_divergence_pre_21() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -4506,19 +4383,14 @@ fn test_sortition_divergence_pre_21() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -4553,11 +4425,9 @@ fn test_sortition_divergence_pre_21() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { + for (cnt, tx) in stacking_txs.iter().enumerate() { eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + submit_tx(&http_origin, tx); } // run a reward cycle @@ -4567,7 +4437,7 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -4586,7 +4456,7 @@ fn test_sortition_divergence_pre_21() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -4595,7 +4465,7 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -4616,7 +4486,7 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -4645,7 +4515,7 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -4657,14 +4527,14 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } } info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -4678,7 +4548,7 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } } @@ -4690,7 +4560,7 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -4706,7 +4576,7 @@ fn test_sortition_divergence_pre_21() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } @@ -4722,7 +4592,7 @@ fn trait_invocation_cross_epoch() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let trait_contract = "(define-trait simple-method ((foo (uint) (response uint uint)) ))"; let impl_contract = @@ -4907,7 +4777,7 @@ fn trait_invocation_cross_epoch() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); } - let interesting_txids = vec![ + let interesting_txids = [ invoke_txid.clone(), invoke_1_txid.clone(), invoke_2_txid.clone(), @@ -4988,21 +4858,13 @@ fn test_v1_unlock_height_with_current_stackers() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -5200,12 +5062,10 @@ fn test_v1_unlock_height_with_current_stackers() { assert_eq!(addr_tuple, pox_addr_tuple_1); } } - } else { - if !burnchain_config.is_in_prepare_phase(height) { - assert_eq!(pox_addrs.len(), 2); - for addr_tuple in pox_addrs { - assert_eq!(addr_tuple, pox_addr_tuple_2); - } + } else if !burnchain_config.is_in_prepare_phase(height) { + assert_eq!(pox_addrs.len(), 2); + for addr_tuple in pox_addrs { + assert_eq!(addr_tuple, pox_addr_tuple_2); } } } @@ -5251,21 +5111,13 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -5475,7 +5327,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { if !burnchain_config.is_in_prepare_phase(height) { let mut have_expected_payout = false; if height < epoch_2_1 + (reward_cycle_len as u64) { - if pox_addrs.len() > 0 { + if !pox_addrs.is_empty() { assert_eq!(pox_addrs.len(), 2); for addr_tuple in pox_addrs { // can either pay to pox tuple 1, or burn @@ -5485,15 +5337,13 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { } } } - } else { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - for addr_tuple in pox_addrs { - // can either pay to pox tuple 2, or burn - assert_ne!(addr_tuple, pox_addr_tuple_1); - if addr_tuple == pox_addr_tuple_2 { - have_expected_payout = true; - } + } else if !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + for addr_tuple in pox_addrs { + // can either pay to pox tuple 2, or burn + assert_ne!(addr_tuple, pox_addr_tuple_1); + if addr_tuple == pox_addr_tuple_2 { + have_expected_payout = true; } } } diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 9bffca7c8a..774a83f712 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -58,7 +58,7 @@ fn disable_pox() { let epoch_2_2 = 255; // two blocks before next prepare phase. let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); @@ -92,31 +92,19 @@ fn disable_pox() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -397,9 +385,9 @@ fn disable_pox() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -420,37 +408,35 @@ fn disable_pox() { .unwrap(); debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -522,7 +508,7 @@ fn disable_pox() { for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], + expected_slots[&reward_cycle][pox_addr], "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", &pox_addr, reward_cycle, @@ -544,8 +530,7 @@ fn disable_pox() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr - && parsed.auth.get_origin_nonce() == aborted_increase_nonce + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == aborted_increase_nonce { let contract_call = match &parsed.payload { TransactionPayload::ContractCall(cc) => cc, @@ -626,31 +611,19 @@ fn pox_2_unlock_all() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -892,7 +865,7 @@ fn pox_2_unlock_all() { // in bitcoin block epoch_2_2 - 1, so `nonce_of_2_1_unlock_ht_call` // will be included in that bitcoin block. // this will build the last block before 2.2 activates - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let tx = make_contract_call( &spender_sk, @@ -913,19 +886,19 @@ fn pox_2_unlock_all() { // in bitcoin block epoch_2_2, so `nonce_of_2_2_unlock_ht_call` // will be included in that bitcoin block. // this block activates 2.2 - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // this *burn block* is when the unlock occurs - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // and this will mine the first block whose parent is the unlock block - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_1_account.balance as u64, @@ -943,7 +916,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance as u64, - spender_2_initial_balance - stacked - (1 * tx_fee), + spender_2_initial_balance - stacked - tx_fee, "Spender 2 should still be locked" ); assert_eq!( @@ -957,13 +930,13 @@ fn pox_2_unlock_all() { // and this will mice the bitcoin block containing the first block whose parent has >= unlock burn block // (which is the criterion for the unlock) - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_1_account.balance, @@ -978,7 +951,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance, - spender_2_initial_balance as u128 - (1 * tx_fee as u128), + spender_2_initial_balance as u128 - tx_fee as u128, "Spender 2 should be unlocked" ); assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); @@ -1001,16 +974,16 @@ fn pox_2_unlock_all() { submit_tx(&http_origin, &tx); // this wakes up the node to mine the transaction - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // this block selects the previously mined block - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); let spender_3_account = get_account(&http_origin, &spender_3_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_3_account.balance, 1_000_000, @@ -1038,7 +1011,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance, - spender_2_initial_balance as u128 - (1 * tx_fee as u128), + spender_2_initial_balance as u128 - tx_fee as u128, "Spender 2 should be unlocked" ); assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); @@ -1080,9 +1053,9 @@ fn pox_2_unlock_all() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -1103,37 +1076,35 @@ fn pox_2_unlock_all() { .unwrap(); debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -1191,7 +1162,7 @@ fn pox_2_unlock_all() { for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], + expected_slots[&reward_cycle][pox_addr], "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", &pox_addr, reward_cycle, @@ -1215,7 +1186,7 @@ fn pox_2_unlock_all() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == nonce_of_2_2_unlock_ht_call { let contract_call = match &parsed.payload { @@ -1231,7 +1202,7 @@ fn pox_2_unlock_all() { assert_eq!(result.to_string(), format!("(ok u{})", epoch_2_2 + 1)); unlock_ht_22_tested = true; } - if &tx_sender == &spender_addr + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == nonce_of_2_1_unlock_ht_call { let contract_call = match &parsed.payload { @@ -1303,15 +1274,9 @@ fn test_pox_reorg_one_flap() { epochs.truncate(5); conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -1358,7 +1323,7 @@ fn test_pox_reorg_one_flap() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -1379,12 +1344,11 @@ fn test_pox_reorg_one_flap() { let node_privkey_1 = StacksNode::make_node_private_key_from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( "{}@{}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), @@ -1396,8 +1360,8 @@ fn test_pox_reorg_one_flap() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -1432,10 +1396,10 @@ fn test_pox_reorg_one_flap() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -1460,8 +1424,8 @@ fn test_pox_reorg_one_flap() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -1483,10 +1447,10 @@ fn test_pox_reorg_one_flap() { ); } - for i in 1..num_miners { + for (i, conf) in confs.iter().enumerate().skip(1) { eprintln!("\n\nBoot miner {}\n\n", i); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); if tip_info.stacks_tip_height > 0 { @@ -1495,11 +1459,7 @@ fn test_pox_reorg_one_flap() { } else { eprintln!("\n\nWaiting for miner {}...\n\n", i); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -1509,19 +1469,14 @@ fn test_pox_reorg_one_flap() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -1533,7 +1488,7 @@ fn test_pox_reorg_one_flap() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -1556,11 +1511,9 @@ fn test_pox_reorg_one_flap() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -1570,7 +1523,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); if tip_info.burn_block_height == 220 { at_220 = true; @@ -1589,7 +1542,7 @@ fn test_pox_reorg_one_flap() { } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); assert!(tip_info.burn_block_height <= 220); } @@ -1598,7 +1551,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -1617,7 +1570,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -1629,7 +1582,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } info!("####################### end of cycle ##############################"); @@ -1642,7 +1595,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -1655,7 +1608,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); // miner 1's history overtakes miner 0's. @@ -1671,7 +1624,7 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Tip for miner {}: {:?}", i, &tip_info); } @@ -1687,7 +1640,7 @@ fn test_pox_reorg_one_flap() { // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); + let tip_info = get_chain_info(c); info!("Final tip for miner {}: {:?}", i, &tip_info); } } diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 2355f7521d..a0cbbfe876 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -52,7 +52,7 @@ fn trait_invocation_behavior() { let spender_addr: PrincipalData = to_addr(&spender_sk).into(); let impl_contract_id = - QualifiedContractIdentifier::new(contract_addr.clone().into(), "impl-simple".into()); + QualifiedContractIdentifier::new(contract_addr.into(), "impl-simple".into()); let mut spender_nonce = 0; let fee_amount = 10_000; @@ -526,7 +526,7 @@ fn trait_invocation_behavior() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr { + if tx_sender == spender_addr { let contract_call = match &parsed.payload { TransactionPayload::ContractCall(cc) => cc, // only interested in contract calls @@ -583,29 +583,27 @@ fn trait_invocation_behavior() { assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); } - for tx_nonce in [expected_good_23_3_nonce] { - assert_eq!( - transaction_receipts[&tx_nonce].0.contract_name.as_str(), - "wrap-simple" - ); - assert_eq!( - transaction_receipts[&tx_nonce].0.function_name.as_str(), - "invocation-1" - ); - assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - } + let tx_nonce = expected_good_23_3_nonce; + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-1" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - for tx_nonce in [expected_good_23_4_nonce] { - assert_eq!( - transaction_receipts[&tx_nonce].0.contract_name.as_str(), - "wrap-simple" - ); - assert_eq!( - transaction_receipts[&tx_nonce].0.function_name.as_str(), - "invocation-2" - ); - assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - } + let tx_nonce = expected_good_23_4_nonce; + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-2" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); for tx_nonce in [expected_bad_22_1_nonce, expected_bad_22_3_nonce] { assert_eq!( diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 26ad007ca7..e39255678d 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -55,9 +55,9 @@ pub fn get_reward_set_entries_at_block( ) -> Result, Error> { state .get_reward_addresses(burnchain, sortdb, burn_block_height, block_id) - .and_then(|mut addrs| { + .map(|mut addrs| { addrs.sort_by_key(|k| k.reward_address.bytes()); - Ok(addrs) + addrs }) } @@ -86,7 +86,7 @@ fn fix_to_pox_contract() { let pox_3_activation_height = epoch_2_4; let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); @@ -110,31 +110,19 @@ fn fix_to_pox_contract() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -341,13 +329,13 @@ fn fix_to_pox_contract() { &[Value::UInt(5000)], ); - info!("Submit 2.2 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.2 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // transition to epoch 2.3 loop { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_3 + 1 { + if tip_info.burn_block_height > epoch_2_3 { break; } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -367,7 +355,7 @@ fn fix_to_pox_contract() { &[Value::UInt(5000)], ); - info!("Submit 2.3 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.3 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // transition to 2 blocks before epoch 2.4 @@ -411,7 +399,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.4 stacking tx to {:?}", &http_origin); + info!("Submit 2.4 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -431,7 +419,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + info!("Submit second 2.4 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -458,7 +446,7 @@ fn fix_to_pox_contract() { &[Value::UInt(increase_by.into())], ); - info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.4 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..19 { @@ -492,9 +480,9 @@ fn fix_to_pox_contract() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -502,7 +490,7 @@ fn fix_to_pox_contract() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -514,38 +502,36 @@ fn fix_to_pox_contract() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + debug!("Test burnchain height {height}"); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -624,14 +610,12 @@ fn fix_to_pox_contract() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -651,7 +635,7 @@ fn fix_to_pox_contract() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr + if tx_sender == spender_addr && (parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_2 || parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_3) { @@ -738,21 +722,13 @@ fn verify_auto_unlock_behavior() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_2_stx_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -765,11 +741,7 @@ fn verify_auto_unlock_behavior() { "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let pox_pubkey_3_stx_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -879,14 +851,14 @@ fn verify_auto_unlock_behavior() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -908,7 +880,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -937,7 +909,7 @@ fn verify_auto_unlock_behavior() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -958,7 +930,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -979,7 +951,7 @@ fn verify_auto_unlock_behavior() { // transition to epoch 2.3 loop { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_3 + 1 { + if tip_info.burn_block_height > epoch_2_3 { break; } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1044,7 +1016,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.4 stacking tx to {:?}", &http_origin); + info!("Submit 2.4 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -1064,7 +1036,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + info!("Submit second 2.4 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -1113,7 +1085,7 @@ fn verify_auto_unlock_behavior() { .unwrap(); assert_eq!(reward_set_entries.len(), 2); - info!("reward set entries: {:?}", reward_set_entries); + info!("reward set entries: {reward_set_entries:?}"); assert_eq!( reward_set_entries[0].reward_address.bytes(), pox_pubkey_2_stx_addr.bytes.0.to_vec() @@ -1141,7 +1113,7 @@ fn verify_auto_unlock_behavior() { &[Value::UInt(first_stacked_incr.into())], ); - info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.4 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..19 { @@ -1213,9 +1185,9 @@ fn verify_auto_unlock_behavior() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -1223,7 +1195,7 @@ fn verify_auto_unlock_behavior() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -1235,37 +1207,35 @@ fn verify_auto_unlock_behavior() { .expect_list() .unwrap(); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -1340,14 +1310,12 @@ fn verify_auto_unlock_behavior() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 6af1bee626..4a3e2a4095 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -79,8 +79,8 @@ fn microblocks_disabled() { conf.node.wait_time_for_blocks = 2_000; conf.miner.wait_for_block_download = false; - conf.miner.first_attempt_time_ms = i64::max_value() as u64; - conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); test_observer::register_any(&mut conf); @@ -111,8 +111,8 @@ fn microblocks_disabled() { 4 * prepare_phase_len / 5, 5, 15, - u64::max_value() - 2, - u64::max_value() - 1, + u64::MAX - 2, + u64::MAX - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 236d76b000..5a8de4d3bd 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -1,3 +1,4 @@ +use std::cmp::Ordering; use std::collections::HashMap; use std::fmt::Write; use std::sync::Mutex; @@ -43,7 +44,7 @@ use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::make_sponsored_stacks_transfer_on_testnet; -const OTHER_CONTRACT: &'static str = " +const OTHER_CONTRACT: &str = " (define-data-var x uint u0) (define-public (f1) (ok (var-get x))) @@ -51,14 +52,14 @@ const OTHER_CONTRACT: &'static str = " (ok (var-set x val))) "; -const CALL_READ_CONTRACT: &'static str = " +const CALL_READ_CONTRACT: &str = " (define-public (public-no-write) (ok (contract-call? .other f1))) (define-public (public-write) (ok (contract-call? .other f2 u5))) "; -const GET_INFO_CONTRACT: &'static str = " +const GET_INFO_CONTRACT: &str = " (define-map block-data { height: uint } { stacks-hash: (buff 32), @@ -143,7 +144,7 @@ const GET_INFO_CONTRACT: &'static str = " (fn-2 (uint) (response uint uint)))) "; -const IMPL_TRAIT_CONTRACT: &'static str = " +const IMPL_TRAIT_CONTRACT: &str = " ;; explicit trait compliance for trait 1 (impl-trait .get-info.trait-1) (define-private (test-height) burn-block-height) @@ -193,7 +194,7 @@ fn integration_test_get_info() { { let mut http_opt = HTTP_BINDING.lock().unwrap(); - http_opt.replace(format!("http://{}", &rpc_bind)); + http_opt.replace(format!("http://{rpc_bind}")); } run_loop @@ -279,10 +280,10 @@ fn integration_test_get_info() { let old_tip = StacksBlockId::new(&consensus_hash, &header_hash); use std::fs; use std::io::Write; - if fs::metadata(&tmppath).is_ok() { - fs::remove_file(&tmppath).unwrap(); + if fs::metadata(tmppath).is_ok() { + fs::remove_file(tmppath).unwrap(); } - let mut f = fs::File::create(&tmppath).unwrap(); + let mut f = fs::File::create(tmppath).unwrap(); f.write_all(&old_tip.serialize_to_vec()).unwrap(); } else if round == 2 { // block-height = 3 @@ -311,7 +312,7 @@ fn integration_test_get_info() { // block-height > 3 let tx = make_contract_call( &principal_sk, - (round - 3).into(), + round - 3, 10, CHAIN_ID_TESTNET, &to_addr(&contract_sk), @@ -337,7 +338,7 @@ fn integration_test_get_info() { if round >= 1 { let tx_xfer = make_stacks_transfer( &spender_sk, - (round - 1).into(), + round - 1, 10, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), @@ -356,16 +357,14 @@ fn integration_test_get_info() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state(|round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_addr = to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()); let contract_identifier = - QualifiedContractIdentifier::parse(&format!("{}.{}", &contract_addr, "get-info")).unwrap(); + QualifiedContractIdentifier::parse(&format!("{contract_addr}.get-info")).unwrap(); let impl_trait_contract_identifier = - QualifiedContractIdentifier::parse(&format!("{}.{}", &contract_addr, "impl-trait-contract")).unwrap(); + QualifiedContractIdentifier::parse(&format!("{contract_addr}.impl-trait-contract")).unwrap(); let http_origin = { HTTP_BINDING.lock().unwrap().clone().unwrap() @@ -374,7 +373,7 @@ fn integration_test_get_info() { match round { 1 => { // - Chain length should be 2. - let blocks = StacksChainState::list_blocks(&chain_state.db()).unwrap(); + let blocks = StacksChainState::list_blocks(chain_state.db()).unwrap(); assert!(chain_tip.metadata.stacks_block_height == 2); // Block #1 should have 5 txs @@ -382,14 +381,14 @@ fn integration_test_get_info() { let parent = chain_tip.block.header.parent_block; let bhh = &chain_tip.metadata.index_block_hash(); - eprintln!("Current Block: {} Parent Block: {}", bhh, parent); + eprintln!("Current Block: {bhh} Parent Block: {parent}"); let parent_val = Value::buff_from(parent.as_bytes().to_vec()).unwrap(); // find header metadata let mut headers = vec![]; for block in blocks.iter() { let header = StacksChainState::get_anchored_block_header_info(chain_state.db(), &block.0, &block.1).unwrap().unwrap(); - eprintln!("{}/{}: {:?}", &block.0, &block.1, &header); + eprintln!("{}/{}: {header:?}", &block.0, &block.1); headers.push(header); } @@ -500,13 +499,12 @@ fn integration_test_get_info() { burn_dbconn, bhh, &contract_identifier, "(exotic-data-checks u4)")); let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/map_entry/{}/{}/{}", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() @@ -514,14 +512,14 @@ fn integration_test_get_info() { let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); - assert!(res.get("proof").is_some()); + assert!(res.contains_key("proof")); assert_eq!(result_data, expected_data); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(100))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() @@ -532,19 +530,18 @@ fn integration_test_get_info() { let sender_addr = to_addr(&StacksPrivateKey::from_hex(SK_3).unwrap()); // now, let's use a query string to get data without a proof - let path = format!("{}/v2/map_entry/{}/{}/{}?proof=0", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data?proof=0"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); - assert!(res.get("proof").is_none()); + assert!(!res.contains_key("proof")); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); @@ -553,19 +550,18 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // now, let's use a query string to get data _with_ a proof - let path = format!("{}/v2/map_entry/{}/{}/{}?proof=1", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data?proof=1"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); - assert!(res.get("proof").is_some()); + assert!(res.contains_key("proof")); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); @@ -574,9 +570,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // account with a nonce entry + a balance entry - let path = format!("{}/v2/accounts/{}", - &http_origin, &sender_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{sender_addr}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 99860); assert_eq!(res.nonce, 4); @@ -584,9 +579,8 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with a nonce entry but not a balance entry - let path = format!("{}/v2/accounts/{}", - &http_origin, &contract_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{contract_addr}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 960); assert_eq!(res.nonce, 4); @@ -594,9 +588,8 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with a balance entry but not a nonce entry - let path = format!("{}/v2/accounts/{}", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); @@ -604,27 +597,24 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with neither! - let path = format!("{}/v2/accounts/{}.get-info", - &http_origin, &contract_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{contract_addr}.get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 0); assert_eq!(res.nonce, 0); assert!(res.nonce_proof.is_some()); assert!(res.balance_proof.is_some()); - let path = format!("{}/v2/accounts/{}?proof=0", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}?proof=0"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); assert!(res.nonce_proof.is_none()); assert!(res.balance_proof.is_none()); - let path = format!("{}/v2/accounts/{}?proof=1", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}?proof=1"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); @@ -632,15 +622,15 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // let's try getting the transfer cost - let path = format!("{}/v2/fees/transfer", &http_origin); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/fees/transfer"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert!(res > 0); // let's get a contract ABI - let path = format!("{}/v2/contracts/interface/{}/{}", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/interface/{contract_addr}/get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); let contract_analysis = mem_type_check(GET_INFO_CONTRACT, ClarityVersion::Clarity2, StacksEpochId::Epoch21).unwrap().1; @@ -652,14 +642,14 @@ fn integration_test_get_info() { // a missing one? - let path = format!("{}/v2/contracts/interface/{}/{}", &http_origin, &contract_addr, "not-there"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/interface/{contract_addr}/not-there"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // let's get a contract SRC - let path = format!("{}/v2/contracts/source/{}/{}", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(res.source, GET_INFO_CONTRACT); @@ -667,8 +657,8 @@ fn integration_test_get_info() { assert!(res.marf_proof.is_some()); - let path = format!("{}/v2/contracts/source/{}/{}?proof=0", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/get-info?proof=0"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(res.source, GET_INFO_CONTRACT); @@ -677,14 +667,14 @@ fn integration_test_get_info() { // a missing one? - let path = format!("{}/v2/contracts/source/{}/{}", &http_origin, &contract_addr, "not-there"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/not-there"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // how about a read-only function call! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "get-exotic-data-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -705,8 +695,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // how about a non read-only function call which does not modify anything - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "main", "public-no-write"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/main/public-no-write"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -732,8 +722,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // how about a non read-only function call which does modify something and should fail - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "main", "public-write"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/main/public-write"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -750,9 +740,8 @@ fn integration_test_get_info() { assert!(res["cause"].as_str().unwrap().contains("NotReadOnly")); // let's try a call with a url-encoded string. - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", - "get-exotic-data-info%3F"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info%3F"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -774,8 +763,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // let's have a runtime error! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "get-exotic-data-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -793,8 +782,8 @@ fn integration_test_get_info() { assert!(res["cause"].as_str().unwrap().contains("UnwrapFailure")); // let's have a runtime error! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "update-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/update-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -814,13 +803,13 @@ fn integration_test_get_info() { // let's submit a valid transaction! let spender_sk = StacksPrivateKey::from_hex(SK_3).unwrap(); - let path = format!("{}/v2/transactions", &http_origin); - eprintln!("Test: POST {} (valid)", path); + let path = format!("{http_origin}/v2/transactions"); + eprintln!("Test: POST {path} (valid)"); // tx_xfer is 180 bytes long let tx_xfer = make_stacks_transfer( &spender_sk, - round.into(), + round, 200, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), @@ -846,17 +835,17 @@ fn integration_test_get_info() { .send() .unwrap().json::().unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!(res.get("error").unwrap().as_str().unwrap(), "transaction rejected"); assert!(res.get("reason").is_some()); // let's submit an invalid transaction! - let path = format!("{}/v2/transactions", &http_origin); - eprintln!("Test: POST {} (invalid)", path); + let path = format!("{http_origin}/v2/transactions"); + eprintln!("Test: POST {path} (invalid)"); // tx_xfer_invalid is 180 bytes long // bad nonce - let tx_xfer_invalid = make_stacks_transfer(&spender_sk, (round + 30).into(), 200, CHAIN_ID_TESTNET, + let tx_xfer_invalid = make_stacks_transfer(&spender_sk, round + 30, 200, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), 456); let tx_xfer_invalid_tx = StacksTransaction::consensus_deserialize(&mut &tx_xfer_invalid[..]).unwrap(); @@ -869,39 +858,39 @@ fn integration_test_get_info() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!(res.get("txid").unwrap().as_str().unwrap(), format!("{}", tx_xfer_invalid_tx.txid())); assert_eq!(res.get("error").unwrap().as_str().unwrap(), "transaction rejected"); assert!(res.get("reason").is_some()); // testing /v2/trait// // trait does not exist - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "get-info", &contract_addr, "get-info", "dummy-trait"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/traits/{contract_addr}/get-info/{contract_addr}/get-info/dummy-trait"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // explicit trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1"); let res = client.get(&path).send().unwrap().json::().unwrap(); eprintln!("Test: GET {}", path); assert!(res.is_implemented); // No trait found - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-4"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-4"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // implicit trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-2"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-2"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // invalid trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-3"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-3"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(!res.is_implemented); // test query parameters for v2/trait endpoint @@ -911,33 +900,33 @@ fn integration_test_get_info() { let tmppath = "/tmp/integration_test_get_info-old-tip"; use std::fs; use std::io::Read; - let mut f = fs::File::open(&tmppath).unwrap(); + let mut f = fs::File::open(tmppath).unwrap(); let mut buf = vec![]; f.read_to_end(&mut buf).unwrap(); let old_tip = StacksBlockId::consensus_deserialize(&mut &buf[..]).unwrap(); - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip={}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1", &old_tip); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1?tip={old_tip}"); let res = client.get(&path).send().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert_eq!(res.text().unwrap(), "No contract analysis found or trait definition not found"); // evaluate check for explicit compliance where tip is the chain tip of the first block (contract DNE at that block), but tip is "latest" - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip=latest", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1?tip=latest"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // perform some tests of the fee rate interface - let path = format!("{}/v2/fees/transaction", &http_origin); + let path = format!("{http_origin}/v2/fees/transaction"); let tx_payload = - TransactionPayload::TokenTransfer(contract_addr.clone().into(), 10_000_000, TokenTransferMemo([0; 34])); + TransactionPayload::TokenTransfer(contract_addr.into(), 10_000_000, TokenTransferMemo([0; 34])); let payload_data = tx_payload.serialize_to_vec(); let payload_hex = format!("0x{}", to_hex(&payload_data)); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let body = json!({ "transaction_payload": payload_hex.clone() }); @@ -948,7 +937,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - eprintln!("{}", res); + eprintln!("{res}"); // destruct the json result // estimated_cost for transfers should be 0 -- their cost is just in their length @@ -975,11 +964,11 @@ fn integration_test_get_info() { .map(|x| x.get("fee").expect("Should have fee field")) .collect(); - assert!(estimated_fee_rates.len() == 3, "Fee rates should be length 3 array"); - assert!(estimated_fees.len() == 3, "Fees should be length 3 array"); + assert_eq!(estimated_fee_rates.len(), 3, "Fee rates should be length 3 array"); + assert_eq!(estimated_fees.len(), 3, "Fees should be length 3 array"); let tx_payload = TransactionPayload::from(TransactionContractCall { - address: contract_addr.clone(), + address: contract_addr, contract_name: "get-info".into(), function_name: "update-info".into(), function_args: vec![], @@ -988,7 +977,7 @@ fn integration_test_get_info() { let payload_data = tx_payload.serialize_to_vec(); let payload_hex = to_hex(&payload_data); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let body = json!({ "transaction_payload": payload_hex.clone() }); @@ -999,7 +988,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - eprintln!("{}", res); + eprintln!("{res}"); // destruct the json result // estimated_cost for transfers should be non-zero @@ -1026,11 +1015,11 @@ fn integration_test_get_info() { .map(|x| x.get("fee").expect("Should have fee field")) .collect(); - assert!(estimated_fee_rates.len() == 3, "Fee rates should be length 3 array"); - assert!(estimated_fees.len() == 3, "Fees should be length 3 array"); + assert_eq!(estimated_fee_rates.len(), 3, "Fee rates should be length 3 array"); + assert_eq!(estimated_fees.len(), 3, "Fees should be length 3 array"); let tx_payload = TransactionPayload::from(TransactionContractCall { - address: contract_addr.clone(), + address: contract_addr, contract_name: "get-info".into(), function_name: "update-info".into(), function_args: vec![], @@ -1041,7 +1030,7 @@ fn integration_test_get_info() { let estimated_len = 1550; let body = json!({ "transaction_payload": payload_hex.clone(), "estimated_len": estimated_len }); - info!("POST body\n {}", body); + info!("POST body\n {body}"); let res = client.post(&path) .json(&body) @@ -1050,7 +1039,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - info!("{}", res); + info!("{res}"); // destruct the json result // estimated_cost for transfers should be non-zero @@ -1094,7 +1083,7 @@ fn integration_test_get_info() { run_loop.start(num_rounds).unwrap(); } -const FAUCET_CONTRACT: &'static str = " +const FAUCET_CONTRACT: &str = " (define-public (spout) (let ((recipient tx-sender)) (print (as-contract (stx-transfer? u1 .faucet recipient))))) @@ -1111,7 +1100,7 @@ fn contract_stx_transfer() { conf.burnchain.commit_anchor_block_within = 5000; conf.add_initial_balance(addr_3.to_string(), 100000); conf.add_initial_balance( - to_addr(&StacksPrivateKey::from_hex(&SK_2).unwrap()).to_string(), + to_addr(&StacksPrivateKey::from_hex(SK_2).unwrap()).to_string(), 1000, ); conf.add_initial_balance(to_addr(&contract_sk).to_string(), 1000); @@ -1133,9 +1122,8 @@ fn contract_stx_transfer() { let consensus_hash = chain_tip.metadata.consensus_hash; let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1226,7 +1214,7 @@ fn contract_stx_transfer() { .submit_raw( &mut chainstate_copy, &sortdb, - &consensus_hash, + consensus_hash, &header_hash, tx, &ExecutionCost::max_value(), @@ -1287,30 +1275,27 @@ fn contract_stx_transfer() { .unwrap_err() { MemPoolRejection::ConflictingNonceInMempool => (), - e => panic!("{:?}", e), + e => panic!("{e:?}"), }; } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); match round { 1 => { - assert!(chain_tip.metadata.stacks_block_height == 2); + assert_eq!(chain_tip.metadata.stacks_block_height, 2); // Block #1 should have 2 txs -- coinbase + transfer assert_eq!(chain_tip.block.txs.len(), 2); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that 1000 stx _was_ transfered to the contract principal @@ -1353,19 +1338,19 @@ fn contract_stx_transfer() { ); } 2 => { - assert!(chain_tip.metadata.stacks_block_height == 3); + assert_eq!(chain_tip.metadata.stacks_block_height, 3); // Block #2 should have 2 txs -- coinbase + publish assert_eq!(chain_tip.block.txs.len(), 2); } 3 => { - assert!(chain_tip.metadata.stacks_block_height == 4); + assert_eq!(chain_tip.metadata.stacks_block_height, 4); // Block #3 should have 2 txs -- coinbase + contract-call, // the second publish _should have been rejected_ assert_eq!(chain_tip.block.txs.len(), 2); // check that 1 stx was transfered to SK_2 via the contract-call let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); @@ -1408,7 +1393,7 @@ fn contract_stx_transfer() { ); } 4 => { - assert!(chain_tip.metadata.stacks_block_height == 5); + assert_eq!(chain_tip.metadata.stacks_block_height, 5); assert_eq!( chain_tip.block.txs.len() as u64, MAXIMUM_MEMPOOL_TX_CHAINING + 1, @@ -1416,7 +1401,7 @@ fn contract_stx_transfer() { ); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); @@ -1491,9 +1476,8 @@ fn mine_transactions_out_of_order() { let consensus_hash = chain_tip.metadata.consensus_hash; let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1578,16 +1562,13 @@ fn mine_transactions_out_of_order() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1610,7 +1591,7 @@ fn mine_transactions_out_of_order() { // check that 1000 stx _was_ transfered to the contract principal let curr_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); assert_eq!( @@ -1698,15 +1679,14 @@ fn mine_contract_twice() { run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); if round == 2 { let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that the contract published! @@ -1761,9 +1741,8 @@ fn bad_contract_tx_rollback() { let addr_2 = to_addr(&sk_2); let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1872,16 +1851,13 @@ fn bad_contract_tx_rollback() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1892,7 +1868,7 @@ fn bad_contract_tx_rollback() { assert_eq!(chain_tip.block.txs.len(), 2); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that 1000 stx _was_ transfered to the contract principal @@ -1967,10 +1943,8 @@ fn make_expensive_contract(inner_loop: &str, other_decl: &str) -> String { for i in 0..10 { contract.push('\n'); contract.push_str(&format!( - "(define-constant list-{} (concat list-{} list-{}))", + "(define-constant list-{} (concat list-{i} list-{i}))", i + 1, - i, - i )); } @@ -2083,7 +2057,7 @@ fn block_limit_runtime_test() { let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; let spender_sks = make_keys(seed, 500); for sk in spender_sks.iter() { - conf.add_initial_balance(to_addr(&sk).to_string(), 1000); + conf.add_initial_balance(to_addr(sk).to_string(), 1000); } let num_rounds = 6; @@ -2097,9 +2071,8 @@ fn block_limit_runtime_test() { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); let (consensus_hash, block_hash) = ( @@ -2107,45 +2080,15 @@ fn block_limit_runtime_test() { &tenure.parent_block.metadata.anchored_header.block_hash(), ); - if round == 1 { - let publish_tx = make_contract_publish( - &contract_sk, - 0, - 10, - CHAIN_ID_TESTNET, - "hello-contract", - EXPENSIVE_CONTRACT.as_str(), - ); - tenure - .mem_pool - .submit_raw( - &mut chainstate_copy, - &sortdb, - consensus_hash, - block_hash, - publish_tx, - &ExecutionCost::max_value(), - &StacksEpochId::Epoch21, - ) - .unwrap(); - } else if round > 1 { - eprintln!("Begin Round: {}", round); - let to_submit = 2 * (round - 1); - - let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; - let spender_sks = make_keys(seed, 500); - - for i in 0..to_submit { - let sk = &spender_sks[(i + round * round) as usize]; - let tx = make_contract_call( - sk, + match round.cmp(&1) { + Ordering::Equal => { + let publish_tx = make_contract_publish( + &contract_sk, 0, 10, CHAIN_ID_TESTNET, - &to_addr(&contract_sk), "hello-contract", - "do-it", - &[], + EXPENSIVE_CONTRACT.as_str(), ); tenure .mem_pool @@ -2154,24 +2097,55 @@ fn block_limit_runtime_test() { &sortdb, consensus_hash, block_hash, - tx, + publish_tx, &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ) .unwrap(); } - } - - return; + Ordering::Greater => { + eprintln!("Begin Round: {round}"); + let to_submit = 2 * (round - 1); + + let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; + let spender_sks = make_keys(seed, 500); + + for i in 0..to_submit { + let sk = &spender_sks[(i + round * round) as usize]; + let tx = make_contract_call( + sk, + 0, + 10, + CHAIN_ID_TESTNET, + &to_addr(&contract_sk), + "hello-contract", + "do-it", + &[], + ); + tenure + .mem_pool + .submit_raw( + &mut chainstate_copy, + &sortdb, + consensus_hash, + block_hash, + tx, + &ExecutionCost::max_value(), + &StacksEpochId::Epoch21, + ) + .unwrap(); + } + } + Ordering::Less => {} + }; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _chain_state, block, _chain_tip_info, _burn_dbconn| { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); @@ -2180,7 +2154,7 @@ fn block_limit_runtime_test() { // Block #1 should have 3 txs -- coinbase + 2 contract calls... assert_eq!(block.block.txs.len(), 3); } - 3 | 4 | 5 => { + 3..=5 => { // Block >= 2 should have 4 txs -- coinbase + 3 contract calls // because the _subsequent_ transactions should never have been // included. @@ -2215,7 +2189,7 @@ fn mempool_errors() { { let mut http_opt = HTTP_BINDING.lock().unwrap(); - http_opt.replace(format!("http://{}", &rpc_bind)); + http_opt.replace(format!("http://{rpc_bind}")); } let mut run_loop = RunLoop::new(conf); @@ -2254,22 +2228,19 @@ fn mempool_errors() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _chain_state, _block, _chain_tip_info, _burn_dbconn| { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); let http_origin = { HTTP_BINDING.lock().unwrap().clone().unwrap() }; let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let spender_sk = StacksPrivateKey::from_hex(SK_3).unwrap(); let spender_addr = to_addr(&spender_sk); @@ -2277,7 +2248,7 @@ fn mempool_errors() { if round == 1 { // let's submit an invalid transaction! - eprintln!("Test: POST {} (invalid)", path); + eprintln!("Test: POST {path} (invalid)"); let tx_xfer_invalid = make_stacks_transfer( &spender_sk, 30, // bad nonce -- too much chaining @@ -2298,7 +2269,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2312,7 +2283,7 @@ fn mempool_errors() { "TooMuchChaining" ); let data = res.get("reason_data").unwrap(); - assert_eq!(data.get("is_origin").unwrap().as_bool().unwrap(), true); + assert!(data.get("is_origin").unwrap().as_bool().unwrap()); assert_eq!( data.get("principal").unwrap().as_str().unwrap(), &spender_addr.to_string() @@ -2340,7 +2311,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2374,7 +2345,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2419,7 +2390,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index b701e70a15..58a526ba30 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -31,13 +31,13 @@ use super::{ use crate::helium::RunLoop; use crate::Keychain; -const FOO_CONTRACT: &'static str = "(define-public (foo) (ok 1)) +const FOO_CONTRACT: &str = "(define-public (foo) (ok 1)) (define-public (bar (x uint)) (ok x))"; -const TRAIT_CONTRACT: &'static str = "(define-trait tr ((value () (response uint uint))))"; -const USE_TRAIT_CONTRACT: &'static str = "(use-trait tr-trait .trait-contract.tr) +const TRAIT_CONTRACT: &str = "(define-trait tr ((value () (response uint uint))))"; +const USE_TRAIT_CONTRACT: &str = "(use-trait tr-trait .trait-contract.tr) (define-public (baz (abc )) (ok (contract-of abc)))"; -const IMPLEMENT_TRAIT_CONTRACT: &'static str = "(define-public (value) (ok u1))"; -const BAD_TRAIT_CONTRACT: &'static str = "(define-public (foo-bar) (ok u1))"; +const IMPLEMENT_TRAIT_CONTRACT: &str = "(define-public (value) (ok u1))"; +const BAD_TRAIT_CONTRACT: &str = "(define-public (foo-bar) (ok u1))"; pub fn make_bad_stacks_transfer( sender: &StacksPrivateKey, @@ -318,17 +318,13 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!( - if let MemPoolRejection::FailedToValidate(ChainstateError::NetError( - NetError::VerifyingError(_), - )) = e - { - true - } else { - false - } - ); + eprintln!("Err: {e:?}"); + assert!(matches!( + e, + MemPoolRejection::FailedToValidate(ChainstateError::NetError( + NetError::VerifyingError(_) + )) + )); // mismatched network on contract-call! let bad_addr = StacksAddress::from_public_keys( @@ -337,8 +333,7 @@ fn mempool_setup_chainstate() { 1, &vec![StacksPublicKey::from_private(&other_sk)], ) - .unwrap() - .into(); + .unwrap(); let tx_bytes = make_contract_call( &contract_sk, @@ -362,11 +357,7 @@ fn mempool_setup_chainstate() { ) .unwrap_err(); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // mismatched network on transfer! let bad_addr = StacksAddress::from_public_keys( @@ -391,11 +382,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // bad fees let tx_bytes = @@ -411,12 +398,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::FeeTooLow(0, _) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::FeeTooLow(0, _))); // bad nonce let tx_bytes = @@ -432,12 +415,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadNonces(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadNonces(_))); // not enough funds let tx_bytes = make_stacks_transfer( @@ -459,15 +438,11 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(111000, 99500))); // sender == recipient - let contract_princ = PrincipalData::from(contract_addr.clone()); + let contract_princ = PrincipalData::from(contract_addr); let tx_bytes = make_stacks_transfer( &contract_sk, 5, @@ -487,7 +462,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(if let MemPoolRejection::TransferRecipientIsSender(r) = e { r == contract_princ } else { @@ -517,15 +492,11 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // tx version must be testnet - let contract_princ = PrincipalData::from(contract_addr.clone()); + let contract_princ = PrincipalData::from(contract_addr); let payload = TransactionPayload::TokenTransfer( contract_princ.clone(), 1000, @@ -551,12 +522,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadTransactionVersion = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadTransactionVersion)); // send amount must be positive let tx_bytes = @@ -572,12 +539,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::TransferAmountMustBePositive = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::TransferAmountMustBePositive)); // not enough funds let tx_bytes = make_stacks_transfer( @@ -599,12 +562,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(111000, 99500))); let tx_bytes = make_stacks_transfer( &contract_sk, @@ -625,12 +584,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(100700, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(100700, 99500))); let tx_bytes = make_contract_call( &contract_sk, @@ -653,12 +608,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoSuchContract = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoSuchContract)); let tx_bytes = make_contract_call( &contract_sk, @@ -681,12 +632,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoSuchPublicFunction = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoSuchPublicFunction)); let tx_bytes = make_contract_call( &contract_sk, @@ -709,12 +656,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadFunctionArgument(_))); let tx_bytes = make_contract_publish( &contract_sk, @@ -735,12 +678,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::ContractAlreadyExists(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::ContractAlreadyExists(_))); let microblock_1 = StacksMicroblockHeader { version: 0, @@ -777,13 +716,13 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let microblock_1 = StacksMicroblockHeader { version: 0, sequence: 0, - prev_block: block_hash.clone(), + prev_block: *block_hash, tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), signature: MessageSignature([0; 65]), }; @@ -791,7 +730,7 @@ fn mempool_setup_chainstate() { let microblock_2 = StacksMicroblockHeader { version: 0, sequence: 0, - prev_block: block_hash.clone(), + prev_block: *block_hash, tx_merkle_root: Sha512Trunc256Sum::from_data(&[1, 2, 3]), signature: MessageSignature([0; 65]), }; @@ -815,7 +754,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let mut microblock_1 = StacksMicroblockHeader { @@ -856,7 +795,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let tx_bytes = make_coinbase(&contract_sk, 5, 1000, CHAIN_ID_TESTNET); @@ -871,12 +810,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoCoinbaseViaMempool = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoCoinbaseViaMempool)); // find the correct priv-key let mut secret_key = None; @@ -936,12 +871,12 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let contract_id = QualifiedContractIdentifier::new( - StandardPrincipalData::from(contract_addr.clone()), - ContractName::try_from("implement-trait-contract").unwrap(), + StandardPrincipalData::from(contract_addr), + ContractName::from("implement-trait-contract"), ); let contract_principal = PrincipalData::Contract(contract_id.clone()); @@ -968,8 +903,8 @@ fn mempool_setup_chainstate() { .unwrap(); let contract_id = QualifiedContractIdentifier::new( - StandardPrincipalData::from(contract_addr.clone()), - ContractName::try_from("bad-trait-contract").unwrap(), + StandardPrincipalData::from(contract_addr), + ContractName::from("bad-trait-contract"), ); let contract_principal = PrincipalData::Contract(contract_id.clone()); @@ -994,11 +929,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadFunctionArgument(_))); } }, ); diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 2c555e7232..6f02ecf138 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -81,11 +81,11 @@ pub const STORE_CONTRACT: &str = r#"(define-map store { key: (string-ascii 32) } (ok true)))"#; // ./blockstack-cli --testnet publish 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 0 0 store /tmp/out.clar -pub const SK_1: &'static str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; -pub const SK_2: &'static str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; -pub const SK_3: &'static str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; +pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; +pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; +pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; -pub const ADDR_4: &'static str = "ST31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZZ239N96"; +pub const ADDR_4: &str = "ST31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZZ239N96"; lazy_static! { pub static ref PUBLISH_CONTRACT: Vec = make_contract_publish( @@ -133,6 +133,7 @@ pub fn insert_new_port(port: u16) -> bool { ports.insert(port) } +#[allow(clippy::too_many_arguments)] pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, @@ -215,6 +216,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( ) } +#[allow(clippy::too_many_arguments)] pub fn serialize_sign_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, @@ -401,10 +403,10 @@ pub fn set_random_binds(config: &mut Config) { let rpc_port = gen_random_port(); let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; - config.node.rpc_bind = format!("{}:{}", localhost, rpc_port); - config.node.p2p_bind = format!("{}:{}", localhost, p2p_port); - config.node.data_url = format!("http://{}:{}", localhost, rpc_port); - config.node.p2p_address = format!("{}:{}", localhost, p2p_port); + config.node.rpc_bind = format!("{localhost}:{rpc_port}"); + config.node.p2p_bind = format!("{localhost}:{p2p_port}"); + config.node.data_url = format!("http://{localhost}:{rpc_port}"); + config.node.p2p_address = format!("{localhost}:{p2p_port}"); } pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { @@ -427,9 +429,10 @@ pub fn make_stacks_transfer( ) -> Vec { let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_sponsored_stacks_transfer_on_testnet( sender: &StacksPrivateKey, payer: &StacksPrivateKey, @@ -443,7 +446,7 @@ pub fn make_sponsored_stacks_transfer_on_testnet( let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); serialize_sign_sponsored_sig_tx_anchor_mode_version( - payload.into(), + payload, sender, payer, sender_nonce, @@ -466,7 +469,7 @@ pub fn make_stacks_transfer_mblock_only( let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); serialize_sign_standard_single_sig_tx_anchor_mode( - payload.into(), + payload, sender, nonce, tx_fee, @@ -484,14 +487,15 @@ pub fn make_poison( header_2: StacksMicroblockHeader, ) -> Vec { let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_contract_call( sender: &StacksPrivateKey, nonce: u64, @@ -506,15 +510,16 @@ pub fn make_contract_call( let function_name = ClarityName::from(function_name); let payload = TransactionContractCall { - address: contract_addr.clone(), + address: *contract_addr, contract_name, function_name, - function_args: function_args.iter().map(|x| x.clone()).collect(), + function_args: function_args.to_vec(), }; serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_contract_call_mblock_only( sender: &StacksPrivateKey, nonce: u64, @@ -529,10 +534,10 @@ pub fn make_contract_call_mblock_only( let function_name = ClarityName::from(function_name); let payload = TransactionContractCall { - address: contract_addr.clone(), + address: *contract_addr, contract_name, function_name, - function_args: function_args.iter().map(|x| x.clone()).collect(), + function_args: function_args.to_vec(), }; serialize_sign_standard_single_sig_tx_anchor_mode( @@ -558,7 +563,7 @@ fn make_microblock( let mut microblock_builder = StacksMicroblockBuilder::new( block.block_hash(), - consensus_hash.clone(), + consensus_hash, chainstate, burn_dbconn, BlockBuilderSettings::max_value(), @@ -576,10 +581,9 @@ fn make_microblock( // NOTE: we intentionally do not check the block's microblock pubkey hash against the private // key, because we may need to test that microblocks get rejected due to bad signatures. - let microblock = microblock_builder + microblock_builder .mine_next_microblock_from_txs(mempool_txs, privk) - .unwrap(); - microblock + .unwrap() } /// Deserializes the `StacksTransaction` objects from `blocks` and returns all those that @@ -601,7 +605,7 @@ pub fn select_transactions_where( } } - return result; + result } /// This function will call `next_block_and_wait` until the burnchain height underlying `BitcoinRegtestController` @@ -614,20 +618,19 @@ pub fn run_until_burnchain_height( target_height: u64, conf: &Config, ) -> bool { - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); let mut current_height = tip_info.burn_block_height; while current_height < target_height { eprintln!( - "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({})", - get_epoch_time_secs(), - current_height + "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({current_height})", + get_epoch_time_secs() ); - let next_result = next_block_and_wait(btc_regtest_controller, &blocks_processed); + let next_result = next_block_and_wait(btc_regtest_controller, blocks_processed); if !next_result { return false; } - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); current_height = tip_info.burn_block_height; } @@ -717,7 +720,6 @@ fn should_succeed_mining_valid_txs() { }, _ => {} }; - return }); // Use block's hook for asserting expectations @@ -743,18 +745,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the smart contract published let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::SmartContract(..) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::SmartContract(..) + )); // 0 event should have been produced let events: Vec = chain_tip @@ -762,7 +764,7 @@ fn should_succeed_mining_valid_txs() { .iter() .flat_map(|a| a.events.clone()) .collect(); - assert!(events.len() == 0); + assert!(events.is_empty()); } 2 => { // Inspecting the chain at round 2. @@ -775,18 +777,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the get-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 2 lockup events should have been produced let events: Vec = chain_tip @@ -807,18 +809,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the set-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 2 lockup events + 1 contract event should have been produced let events: Vec = chain_tip @@ -832,7 +834,7 @@ fn should_succeed_mining_valid_txs() { format!("{}", data.key.0) == "STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A.store" && data.key.1 == "print" - && format!("{}", data.value) == "\"Setting key foo\"".to_string() + && format!("{}", data.value) == "\"Setting key foo\"" } _ => false, }); @@ -848,18 +850,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the get-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 1 event should have been produced let events: Vec = chain_tip @@ -873,7 +875,7 @@ fn should_succeed_mining_valid_txs() { format!("{}", data.key.0) == "STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A.store" && data.key.1 == "print" - && format!("{}", data.value) == "\"Getting key foo\"".to_string() + && format!("{}", data.value) == "\"Getting key foo\"" } _ => false, }); @@ -889,19 +891,19 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the STX transfer let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::TokenTransfer(_, _, _) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::TokenTransfer(_, _, _) + )); // 1 event should have been produced let events: Vec = chain_tip @@ -996,7 +998,6 @@ fn should_succeed_handling_malformed_and_valid_txs() { }, _ => {} }; - return }); // Use block's hook for asserting expectations @@ -1014,10 +1015,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 1 => { // Inspecting the chain at round 1. @@ -1030,18 +1031,18 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the smart contract published let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::SmartContract(..) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::SmartContract(..) + )); } 2 => { // Inspecting the chain at round 2. @@ -1054,10 +1055,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 3 => { // Inspecting the chain at round 3. @@ -1070,10 +1071,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 4 => { // Inspecting the chain at round 4. @@ -1086,18 +1087,18 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); } _ => {} } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b5140a06ee..3e9f235424 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -200,9 +200,7 @@ impl TestSigningChannel { /// TODO: update to use signatures vec pub fn get_signature() -> Option> { let mut signer = TEST_SIGNING.lock().unwrap(); - let Some(sign_channels) = signer.as_mut() else { - return None; - }; + let sign_channels = signer.as_mut()?; let recv = sign_channels.recv.take().unwrap(); drop(signer); // drop signer so we don't hold the lock while receiving. let signatures = recv.recv_timeout(Duration::from_secs(30)).unwrap(); @@ -362,7 +360,7 @@ pub fn blind_signer_multinode( thread::sleep(Duration::from_secs(2)); info!("Checking for a block proposal to sign..."); last_count = cur_count; - let configs: Vec<&Config> = configs.iter().map(|x| x).collect(); + let configs: Vec<&Config> = configs.iter().collect(); match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { Ok(signed_block) => { if signed_blocks.contains(&signed_block) { @@ -427,10 +425,12 @@ pub fn get_latest_block_proposal( .collect(); proposed_blocks.sort_by(|(block_a, _, is_latest_a), (block_b, _, is_latest_b)| { - if block_a.header.chain_length > block_b.header.chain_length { - return std::cmp::Ordering::Greater; - } else if block_a.header.chain_length < block_b.header.chain_length { - return std::cmp::Ordering::Less; + let res = block_a + .header + .chain_length + .cmp(&block_b.header.chain_length); + if res != std::cmp::Ordering::Equal { + return res; } // the heights are tied, tie break with the latest miner if *is_latest_a { @@ -439,7 +439,7 @@ pub fn get_latest_block_proposal( if *is_latest_b { return std::cmp::Ordering::Less; } - return std::cmp::Ordering::Equal; + std::cmp::Ordering::Equal }); for (b, _, is_latest) in proposed_blocks.iter() { @@ -542,7 +542,7 @@ pub fn read_and_sign_block_proposal( channel .send(proposed_block.header.signer_signature) .unwrap(); - return Ok(signer_sig_hash); + Ok(signer_sig_hash) } /// Return a working nakamoto-neon config and the miner's bitcoin address to fund @@ -585,12 +585,12 @@ pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress burnchain.peer_host = Some("127.0.0.1".to_string()); } - conf.burnchain.magic_bytes = MagicBytes::from(['T' as u8, '3' as u8].as_ref()); + conf.burnchain.magic_bytes = MagicBytes::from([b'T', b'3'].as_ref()); conf.burnchain.poll_time_secs = 1; conf.node.pox_sync_sample_secs = 0; - conf.miner.first_attempt_time_ms = i64::max_value() as u64; - conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; // if there's just one node, then this must be true for tests to pass conf.miner.wait_for_block_download = false; @@ -709,7 +709,7 @@ pub fn next_block_and_wait_for_commits( coord_channels: &[&Arc>], commits_submitted: &[&Arc], ) -> Result<(), String> { - let commits_submitted: Vec<_> = commits_submitted.iter().cloned().collect(); + let commits_submitted: Vec<_> = commits_submitted.to_vec(); let blocks_processed_before: Vec<_> = coord_channels .iter() .map(|x| { @@ -786,7 +786,7 @@ pub fn setup_stacker(naka_conf: &mut Config) -> Secp256k1PrivateKey { let stacker_sk = Secp256k1PrivateKey::new(); let stacker_address = tests::to_addr(&stacker_sk); naka_conf.add_initial_balance( - PrincipalData::from(stacker_address.clone()).to_string(), + PrincipalData::from(stacker_address).to_string(), POX_4_DEFAULT_STACKER_BALANCE, ); stacker_sk @@ -813,17 +813,17 @@ pub fn boot_to_epoch_3( "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); let start_time = Instant::now(); loop { if start_time.elapsed() > Duration::from_secs(20) { panic!("Timed out waiting for the stacks height to increment") } - let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + let stacks_height = get_chain_info(naka_conf).stacks_tip_height; if stacks_height >= 1 { break; } @@ -840,13 +840,13 @@ pub fn boot_to_epoch_3( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -860,7 +860,7 @@ pub fn boot_to_epoch_3( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -900,9 +900,9 @@ pub fn boot_to_epoch_3( // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, reward_set_calculation, - &naka_conf, + naka_conf, ); // We need to vote on the aggregate public key if this test is self signing @@ -943,9 +943,9 @@ pub fn boot_to_epoch_3( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3.start_height - 1, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); @@ -975,17 +975,17 @@ pub fn boot_to_pre_epoch_3_boundary( "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); let start_time = Instant::now(); loop { if start_time.elapsed() > Duration::from_secs(20) { panic!("Timed out waiting for the stacks height to increment") } - let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + let stacks_height = get_chain_info(naka_conf).stacks_tip_height; if stacks_height >= 1 { break; } @@ -1002,13 +1002,13 @@ pub fn boot_to_pre_epoch_3_boundary( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -1022,7 +1022,7 @@ pub fn boot_to_pre_epoch_3_boundary( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -1062,9 +1062,9 @@ pub fn boot_to_pre_epoch_3_boundary( // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, reward_set_calculation, - &naka_conf, + naka_conf, ); // We need to vote on the aggregate public key if this test is self signing @@ -1105,9 +1105,9 @@ pub fn boot_to_pre_epoch_3_boundary( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3.start_height - 2, - &naka_conf, + naka_conf, ); info!("Bootstrapped to one block before Epoch 3.0 boundary, Epoch 2.x miner should continue for one more block"); @@ -1191,7 +1191,7 @@ pub fn is_key_set_for_cycle( is_mainnet: bool, http_origin: &str, ) -> Result { - let key = get_key_for_cycle(reward_cycle, is_mainnet, &http_origin)?; + let key = get_key_for_cycle(reward_cycle, is_mainnet, http_origin)?; Ok(key.is_some()) } @@ -1218,10 +1218,10 @@ pub fn setup_epoch_3_reward_set( let epoch_3_reward_cycle_boundary = epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // stack enough to activate pox-4 let block_height = btc_regtest_controller.get_headers_height(); @@ -1241,13 +1241,13 @@ pub fn setup_epoch_3_reward_set( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -1260,7 +1260,7 @@ pub fn setup_epoch_3_reward_set( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -1322,9 +1322,9 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3_reward_set_calculation_boundary, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch 3.0 reward set calculation boundary height: {epoch_3_reward_set_calculation_boundary}."); @@ -1364,9 +1364,9 @@ pub fn boot_to_epoch_25( ); run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_25_start_height, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch 2.5: {epoch_25_start_height}."); } @@ -1391,7 +1391,7 @@ pub fn boot_to_epoch_3_reward_set( btc_regtest_controller, num_stacking_cycles, ); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); info!( "Bootstrapped to Epoch 3.0 reward set calculation height: {}", get_chain_info(naka_conf).burn_block_height @@ -1426,7 +1426,7 @@ fn simple_neon_integration() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); let sender_sk = Secp256k1PrivateKey::new(); @@ -1435,16 +1435,13 @@ fn simple_neon_integration() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt * 2 + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1502,7 +1499,7 @@ fn simple_neon_integration() { #[cfg(feature = "monitoring_prom")] { wait_for(10, || { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1569,8 +1566,7 @@ fn simple_neon_integration() { .as_array() .unwrap() .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) }); Ok(transfer_tx_included) }) @@ -1598,17 +1594,13 @@ fn simple_neon_integration() { ); // assert that the transfer tx was observed - let transfer_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() - }) - .is_some(); + let transfer_tx_included = test_observer::get_blocks().into_iter().any(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + }); assert!( transfer_tx_included, @@ -1626,7 +1618,7 @@ fn simple_neon_integration() { #[cfg(feature = "monitoring_prom")] { wait_for(10, || { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1676,7 +1668,7 @@ fn flash_blocks_on_epoch_3() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); @@ -1685,16 +1677,13 @@ fn flash_blocks_on_epoch_3() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt * 2 + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1852,17 +1841,13 @@ fn flash_blocks_on_epoch_3() { ); // assert that the transfer tx was observed - let transfer_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() - }) - .is_some(); + let transfer_tx_included = test_observer::get_blocks().into_iter().any(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + }); assert!( transfer_tx_included, @@ -1898,18 +1883,13 @@ fn flash_blocks_on_epoch_3() { // Verify that there's a gap of AT LEAST 3 blocks assert!( gap_end - gap_start + 1 >= 3, - "Expected a gap of AT LEAST 3 burn blocks due to flash blocks, found gap from {} to {}", - gap_start, - gap_end + "Expected a gap of AT LEAST 3 burn blocks due to flash blocks, found gap from {gap_start} to {gap_end}" ); // Verify that the gap includes the Epoch 3.0 activation height assert!( gap_start <= epoch_3_start_height && epoch_3_start_height <= gap_end, - "Expected the gap ({}..={}) to include the Epoch 3.0 activation height ({})", - gap_start, - gap_end, - epoch_3_start_height + "Expected the gap ({gap_start}..={gap_end}) to include the Epoch 3.0 activation height ({epoch_3_start_height})" ); // Verify blocks before and after the gap @@ -1918,7 +1898,7 @@ fn flash_blocks_on_epoch_3() { check_nakamoto_empty_block_heuristics(); info!("Verified burn block ranges, including expected gap for flash blocks"); - info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {}", epoch_3_start_height); + info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {epoch_3_start_height}"); coord_channel .lock() @@ -1957,13 +1937,10 @@ fn mine_multiple_per_tenure_integration() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1993,7 +1970,7 @@ fn mine_multiple_per_tenure_integration() { .spawn(move || run_loop.start(None, 0)) .unwrap(); wait_for_runloop(&blocks_processed); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); boot_to_epoch_3( &naka_conf, &blocks_processed, @@ -2028,7 +2005,7 @@ fn mine_multiple_per_tenure_integration() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("Mining tenure {}", tenure_ix); + debug!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -2145,22 +2122,19 @@ fn multiple_miners() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let mut conf_node_2 = naka_conf.clone(); let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.seed = vec![2, 2, 2, 2]; conf_node_2.burnchain.local_mining_public_key = Some( Keychain::default(conf_node_2.node.seed.clone()) @@ -2175,7 +2149,7 @@ fn multiple_miners() { let node_1_sk = Secp256k1PrivateKey::from_seed(&naka_conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), naka_conf.node.p2p_bind), @@ -2243,7 +2217,7 @@ fn multiple_miners() { .unwrap(); wait_for_runloop(&blocks_processed); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); boot_to_epoch_3( &naka_conf, &blocks_processed, @@ -2286,7 +2260,7 @@ fn multiple_miners() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -2386,9 +2360,9 @@ fn correct_burn_outs() { { let epochs = naka_conf.burnchain.epochs.as_mut().unwrap(); - let epoch_24_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch24).unwrap(); - let epoch_25_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap(); - let epoch_30_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap(); + let epoch_24_ix = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch24).unwrap(); + let epoch_25_ix = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch25).unwrap(); + let epoch_30_ix = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch30).unwrap(); epochs[epoch_24_ix].end_height = 208; epochs[epoch_25_ix].start_height = 208; epochs[epoch_25_ix].end_height = 225; @@ -2411,10 +2385,7 @@ fn correct_burn_outs() { let stacker_accounts = accounts[0..3].to_vec(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let signers = TestSigners::new(vec![sender_signer_sk]); @@ -2503,7 +2474,7 @@ fn correct_burn_outs() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&account.0).bytes, + tests::to_addr(account.0).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); @@ -2524,7 +2495,7 @@ fn correct_burn_outs() { .to_rsv(); let stacking_tx = tests::make_contract_call( - &account.0, + account.0, account.2.nonce, 1000, naka_conf.burnchain.chain_id, @@ -2586,7 +2557,7 @@ fn correct_burn_outs() { .block_height_to_reward_cycle(epoch_3.start_height) .unwrap(); - info!("first_epoch_3_cycle: {:?}", first_epoch_3_cycle); + info!("first_epoch_3_cycle: {first_epoch_3_cycle:?}"); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); let stacker_response = get_stacker_set(&http_origin, first_epoch_3_cycle).unwrap(); @@ -2732,10 +2703,7 @@ fn block_proposal_api_endpoint() { let stacker_sk = setup_stacker(&mut conf); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); // only subscribe to the block proposal events test_observer::spawn(); @@ -2760,7 +2728,7 @@ fn block_proposal_api_endpoint() { let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( &conf, @@ -2814,7 +2782,7 @@ fn block_proposal_api_endpoint() { .unwrap() .unwrap(); - let privk = conf.miner.mining_key.unwrap().clone(); + let privk = conf.miner.mining_key.unwrap(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()) .expect("Failed to get sortition tip"); let db_handle = sortdb.index_handle(&sort_tip); @@ -2910,41 +2878,41 @@ fn block_proposal_api_endpoint() { ("Must wait", sign(&proposal), HTTP_TOO_MANY, None), ( "Non-canonical or absent tenure", - (|| { + { let mut sp = sign(&proposal); sp.block.header.consensus_hash.0[3] ^= 0x07; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::NonCanonicalTenure)), ), ( "Corrupted (bit flipped after signing)", - (|| { + { let mut sp = sign(&proposal); sp.block.header.timestamp ^= 0x07; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::ChainstateError)), ), ( "Invalid `chain_id`", - (|| { + { let mut p = proposal.clone(); p.chain_id ^= 0xFFFFFFFF; sign(&p) - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::InvalidBlock)), ), ( "Invalid `miner_signature`", - (|| { + { let mut sp = sign(&proposal); sp.block.header.miner_signature.0[1] ^= 0x80; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::ChainstateError)), ), @@ -3042,10 +3010,7 @@ fn block_proposal_api_endpoint() { .iter() .zip(proposal_responses.iter()) { - info!( - "Received response {:?}, expecting {:?}", - &response, &expected_response - ); + info!("Received response {response:?}, expecting {expected_response:?}"); match expected_response { Ok(_) => { assert!(matches!(response, BlockValidateResponse::Ok(_))); @@ -3093,19 +3058,16 @@ fn miner_writes_proposed_block_to_stackerdb() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); let stacker_sk = setup_stacker(&mut naka_conf); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); test_observer::spawn(); test_observer::register( @@ -3216,9 +3178,9 @@ fn vote_for_aggregate_key_burn_op() { let signer_sk = Secp256k1PrivateKey::new(); let signer_addr = tests::to_addr(&signer_sk); - let mut signers = TestSigners::new(vec![signer_sk.clone()]); + let mut signers = TestSigners::new(vec![signer_sk]); - naka_conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + naka_conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -3277,7 +3239,7 @@ fn vote_for_aggregate_key_burn_op() { let mut miner_signer = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting pre-stx op"); let pre_stx_op = PreStxOp { - output: signer_addr.clone(), + output: signer_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -3343,13 +3305,13 @@ fn vote_for_aggregate_key_burn_op() { let stacker_pk = StacksPublicKey::from_private(&stacker_sk); let signer_key: StacksPublicKeyBuffer = stacker_pk.to_bytes_compressed().as_slice().into(); - let aggregate_key = signer_key.clone(); + let aggregate_key = signer_key; let vote_for_aggregate_key_op = BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { signer_key, signer_index, - sender: signer_addr.clone(), + sender: signer_addr, round: 0, reward_cycle, aggregate_key, @@ -3360,7 +3322,7 @@ fn vote_for_aggregate_key_burn_op() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut signer_burnop_signer = BurnchainOpSigner::new(signer_sk.clone(), false); + let mut signer_burnop_signer = BurnchainOpSigner::new(signer_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -3393,10 +3355,10 @@ fn vote_for_aggregate_key_burn_op() { for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { - info!("Found a burn op: {:?}", tx); + info!("Found a burn op: {tx:?}"); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); if !burnchain_op.contains_key("vote_for_aggregate_key") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); + warn!("Got unexpected burnchain op: {burnchain_op:?}"); panic!("unexpected btc transaction type"); } let vote_obj = burnchain_op.get("vote_for_aggregate_key").unwrap(); @@ -3446,7 +3408,7 @@ fn follower_bootup() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3455,13 +3417,10 @@ fn follower_bootup() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -3574,7 +3533,7 @@ fn follower_bootup() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("follower_bootup: Miner runs tenure {}", tenure_ix); + debug!("follower_bootup: Miner runs tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -3582,10 +3541,7 @@ fn follower_bootup() { let mut last_tip = BlockHeaderHash([0x00; 32]); let mut last_nonce = None; - debug!( - "follower_bootup: Miner mines interum blocks for tenure {}", - tenure_ix - ); + debug!("follower_bootup: Miner mines interum blocks for tenure {tenure_ix}"); // mine the interim blocks for _ in 0..inter_blocks_per_tenure { @@ -3621,8 +3577,8 @@ fn follower_bootup() { let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); - debug!("follower_bootup: Miner account: {:?}", &account); - debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + debug!("follower_bootup: Miner account: {account:?}"); + debug!("follower_bootup: Miner sent {}: {tx:?}", &tx.txid()); let now = get_epoch_time_secs(); while get_epoch_time_secs() < now + 10 { @@ -3773,7 +3729,7 @@ fn follower_bootup_across_multiple_cycles() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3782,13 +3738,10 @@ fn follower_bootup_across_multiple_cycles() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -3972,7 +3925,7 @@ fn follower_bootup_custom_chain_id() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3981,13 +3934,10 @@ fn follower_bootup_custom_chain_id() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -4100,7 +4050,7 @@ fn follower_bootup_custom_chain_id() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("follower_bootup: Miner runs tenure {}", tenure_ix); + debug!("follower_bootup: Miner runs tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -4108,10 +4058,7 @@ fn follower_bootup_custom_chain_id() { let mut last_tip = BlockHeaderHash([0x00; 32]); let mut last_nonce = None; - debug!( - "follower_bootup: Miner mines interum blocks for tenure {}", - tenure_ix - ); + debug!("follower_bootup: Miner mines interum blocks for tenure {tenure_ix}"); // mine the interim blocks for _ in 0..inter_blocks_per_tenure { @@ -4147,8 +4094,8 @@ fn follower_bootup_custom_chain_id() { let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); - debug!("follower_bootup: Miner account: {:?}", &account); - debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + debug!("follower_bootup: Miner account: {account:?}"); + debug!("follower_bootup: Miner sent {}: {tx:?}", &tx.txid()); let now = get_epoch_time_secs(); while get_epoch_time_secs() < now + 10 { @@ -4326,23 +4273,14 @@ fn burn_ops_integration_test() { let sender_addr = tests::to_addr(&sender_sk); let mut sender_nonce = 0; - let mut signers = TestSigners::new(vec![signer_sk_1.clone()]); + let mut signers = TestSigners::new(vec![signer_sk_1]); let stacker_sk = setup_stacker(&mut naka_conf); // Add the initial balances to the other accounts - naka_conf.add_initial_balance( - PrincipalData::from(stacker_addr_1.clone()).to_string(), - 1000000, - ); - naka_conf.add_initial_balance( - PrincipalData::from(stacker_addr_2.clone()).to_string(), - 1000000, - ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), - 100_000_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(stacker_addr_1).to_string(), 1000000); + naka_conf.add_initial_balance(PrincipalData::from(stacker_addr_2).to_string(), 1000000); + naka_conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 100_000_000); test_observer::spawn(); test_observer::register_any(&mut naka_conf); @@ -4395,7 +4333,7 @@ fn burn_ops_integration_test() { info!("Submitting first pre-stx op"); let pre_stx_op = PreStxOp { - output: signer_addr_1.clone(), + output: signer_addr_1, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -4426,7 +4364,7 @@ fn burn_ops_integration_test() { let mut miner_signer_2 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting second pre-stx op"); let pre_stx_op_2 = PreStxOp { - output: signer_addr_2.clone(), + output: signer_addr_2, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -4448,7 +4386,7 @@ fn burn_ops_integration_test() { let mut miner_signer_3 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting third pre-stx op"); let pre_stx_op_3 = PreStxOp { - output: stacker_addr_1.clone(), + output: stacker_addr_1, txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, @@ -4469,7 +4407,7 @@ fn burn_ops_integration_test() { info!("Submitting fourth pre-stx op"); let mut miner_signer_4 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); let pre_stx_op_4 = PreStxOp { - output: stacker_addr_2.clone(), + output: stacker_addr_2, txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, @@ -4566,10 +4504,10 @@ fn burn_ops_integration_test() { "reward_cycle" => reward_cycle, ); - let mut signer_burnop_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); - let mut signer_burnop_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); - let mut stacker_burnop_signer_1 = BurnchainOpSigner::new(stacker_sk_1.clone(), false); - let mut stacker_burnop_signer_2 = BurnchainOpSigner::new(stacker_sk_2.clone(), false); + let mut signer_burnop_signer_1 = BurnchainOpSigner::new(signer_sk_1, false); + let mut signer_burnop_signer_2 = BurnchainOpSigner::new(signer_sk_2, false); + let mut stacker_burnop_signer_1 = BurnchainOpSigner::new(stacker_sk_1, false); + let mut stacker_burnop_signer_2 = BurnchainOpSigner::new(stacker_sk_2, false); info!( "Before stack-stx op, signer 1 total: {}", @@ -4603,8 +4541,8 @@ fn burn_ops_integration_test() { info!("Submitting transfer STX op"); let transfer_stx_op = TransferStxOp { - sender: stacker_addr_1.clone(), - recipient: stacker_addr_2.clone(), + sender: stacker_addr_1, + recipient: stacker_addr_2, transfered_ustx: 10000, memo: vec![], txid: Txid([0u8; 32]), @@ -4626,8 +4564,8 @@ fn burn_ops_integration_test() { info!("Submitting delegate STX op"); let del_stx_op = DelegateStxOp { - sender: stacker_addr_2.clone(), - delegate_to: stacker_addr_1.clone(), + sender: stacker_addr_2, + delegate_to: stacker_addr_1, reward_addr: None, delegated_ustx: 100_000, // to be filled in @@ -4654,7 +4592,7 @@ fn burn_ops_integration_test() { let min_stx = pox_info.next_cycle.min_threshold_ustx; let stack_stx_op_with_some_signer_key = StackStxOp { - sender: signer_addr_1.clone(), + sender: signer_addr_1, reward_addr: pox_addr, stacked_ustx: min_stx.into(), num_cycles: lock_period, @@ -4681,7 +4619,7 @@ fn burn_ops_integration_test() { ); let stack_stx_op_with_no_signer_key = StackStxOp { - sender: signer_addr_2.clone(), + sender: signer_addr_2, reward_addr: PoxAddress::Standard(signer_addr_2, None), stacked_ustx: 100000, num_cycles: 6, @@ -4766,7 +4704,7 @@ fn burn_ops_integration_test() { for tx in transactions.iter().rev() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { - info!("Found a burn op: {:?}", tx); + info!("Found a burn op: {tx:?}"); assert!(block_has_tenure_change, "Block should have a tenure change"); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); if burnchain_op.contains_key("transfer_stx") { @@ -4784,15 +4722,14 @@ fn burn_ops_integration_test() { assert_eq!(recipient, stacker_addr_2.to_string()); assert_eq!(transfered_ustx, 10000); info!( - "Transfer STX op: sender: {}, recipient: {}, transfered_ustx: {}", - sender, recipient, transfered_ustx + "Transfer STX op: sender: {sender}, recipient: {recipient}, transfered_ustx: {transfered_ustx}" ); assert!(!transfer_stx_found, "Transfer STX op should be unique"); transfer_stx_found = true; continue; } if burnchain_op.contains_key("delegate_stx") { - info!("Got delegate STX op: {:?}", burnchain_op); + info!("Got delegate STX op: {burnchain_op:?}"); let delegate_stx_obj = burnchain_op.get("delegate_stx").unwrap(); let sender_obj = delegate_stx_obj.get("sender").unwrap(); let sender = sender_obj.get("address").unwrap().as_str().unwrap(); @@ -4811,7 +4748,7 @@ fn burn_ops_integration_test() { continue; } if !burnchain_op.contains_key("stack_stx") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); + warn!("Got unexpected burnchain op: {burnchain_op:?}"); panic!("unexpected btc transaction type"); } let stack_stx_obj = burnchain_op.get("stack_stx").unwrap(); @@ -4882,7 +4819,7 @@ fn burn_ops_integration_test() { for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh).unwrap(); for stacking_op in stacking_ops.into_iter() { - debug!("Stacking op queried from sortdb: {:?}", stacking_op); + debug!("Stacking op queried from sortdb: {stacking_op:?}"); match stacking_op.signer_key { Some(_) => found_some = true, None => found_none = true, @@ -4937,17 +4874,14 @@ fn forked_tenure_is_ignored() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -5075,7 +5009,7 @@ fn forked_tenure_is_ignored() { .nakamoto_blocks_db() .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() - .get(0) + .first() .cloned() .unwrap(); @@ -5304,13 +5238,10 @@ fn check_block_heights() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -5394,12 +5325,12 @@ fn check_block_heights() { vec![], ); let preheights = heights0_value.expect_tuple().unwrap(); - info!("Heights from pre-epoch 3.0: {}", preheights); + info!("Heights from pre-epoch 3.0: {preheights}"); wait_for_first_naka_block_commit(60, &commits_submitted); let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); // With the first Nakamoto block, the chain tip and the number of tenures // must be the same (before Nakamoto every block counts as a tenure) @@ -5417,7 +5348,7 @@ fn check_block_heights() { vec![], ); let heights0 = heights0_value.expect_tuple().unwrap(); - info!("Heights from epoch 3.0 start: {}", heights0); + info!("Heights from epoch 3.0 start: {heights0}"); assert_eq!( heights0.get("burn-block-height"), preheights.get("burn-block-height"), @@ -5466,7 +5397,7 @@ fn check_block_heights() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -5488,7 +5419,7 @@ fn check_block_heights() { vec![], ); let heights1 = heights1_value.expect_tuple().unwrap(); - info!("Heights from Clarity 1: {}", heights1); + info!("Heights from Clarity 1: {heights1}"); let heights3_value = call_read_only( &naka_conf, @@ -5498,7 +5429,7 @@ fn check_block_heights() { vec![], ); let heights3 = heights3_value.expect_tuple().unwrap(); - info!("Heights from Clarity 3: {}", heights3); + info!("Heights from Clarity 3: {heights3}"); let bbh1 = heights1 .get("burn-block-height") @@ -5598,7 +5529,7 @@ fn check_block_heights() { vec![], ); let heights1 = heights1_value.expect_tuple().unwrap(); - info!("Heights from Clarity 1: {}", heights1); + info!("Heights from Clarity 1: {heights1}"); let heights3_value = call_read_only( &naka_conf, @@ -5608,7 +5539,7 @@ fn check_block_heights() { vec![], ); let heights3 = heights3_value.expect_tuple().unwrap(); - info!("Heights from Clarity 3: {}", heights3); + info!("Heights from Clarity 3: {heights3}"); let bbh1 = heights1 .get("burn-block-height") @@ -5723,17 +5654,11 @@ fn nakamoto_attempt_time() { let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), - 1_000_000_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 1_000_000_000); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100_000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -5849,7 +5774,7 @@ fn nakamoto_attempt_time() { // mine the interim blocks for tenure_count in 0..inter_blocks_per_tenure { - debug!("nakamoto_attempt_time: begin tenure {}", tenure_count); + debug!("nakamoto_attempt_time: begin tenure {tenure_count}"); let blocks_processed_before = coord_channel .lock() @@ -5987,8 +5912,7 @@ fn nakamoto_attempt_time() { break 'submit_txs; } info!( - "nakamoto_times_ms: on account {}; sent {} txs so far (out of {})", - acct_idx, tx_count, tx_limit + "nakamoto_times_ms: on account {acct_idx}; sent {tx_count} txs so far (out of {tx_limit})" ); } acct_idx += 1; @@ -6056,13 +5980,10 @@ fn clarity_burn_state() { let tx_fee = 1000; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), deploy_fee + tx_fee * tenure_count + tx_fee * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -6139,7 +6060,7 @@ fn clarity_burn_state() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); // Don't submit this tx on the first iteration, because the contract is not published yet. if tenure_ix > 0 { @@ -6200,7 +6121,7 @@ fn clarity_burn_state() { let info = get_chain_info(&naka_conf); burn_block_height = info.burn_block_height as u128; - info!("Expecting burn block height to be {}", burn_block_height); + info!("Expecting burn block height to be {burn_block_height}"); // Assert that the contract call was successful test_observer::get_mined_nakamoto_blocks() @@ -6215,11 +6136,11 @@ fn clarity_burn_state() { return; } - info!("Contract call result: {}", result); + info!("Contract call result: {result}"); result.clone().expect_result_ok().expect("Ok result"); } _ => { - info!("Unsuccessful event: {:?}", event); + info!("Unsuccessful event: {event:?}"); panic!("Expected a successful transaction"); } }); @@ -6241,7 +6162,7 @@ fn clarity_burn_state() { "foo", vec![&expected_height], ); - info!("Read-only result: {:?}", result); + info!("Read-only result: {result:?}"); result.expect_result_ok().expect("Read-only call failed"); // Submit a tx to trigger the next block @@ -6277,11 +6198,11 @@ fn clarity_burn_state() { .iter() .for_each(|event| match event { TransactionEvent::Success(TransactionSuccessEvent { result, .. }) => { - info!("Contract call result: {}", result); + info!("Contract call result: {result}"); result.clone().expect_result_ok().expect("Ok result"); } _ => { - info!("Unsuccessful event: {:?}", event); + info!("Unsuccessful event: {event:?}"); panic!("Expected a successful transaction"); } }); @@ -6307,6 +6228,7 @@ fn clarity_burn_state() { #[test] #[ignore] +#[allow(clippy::drop_non_drop)] fn signer_chainstate() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -6314,7 +6236,7 @@ fn signer_chainstate() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -6324,15 +6246,12 @@ fn signer_chainstate() { let send_amt = 1000; let send_fee = 200; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * 20, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -6388,7 +6307,7 @@ fn signer_chainstate() { .unwrap() .unwrap() .stacks_block_height; - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -6639,13 +6558,13 @@ fn signer_chainstate() { // Case: the block doesn't confirm the prior blocks that have been signed. let last_tenure = &last_tenures_proposals.as_ref().unwrap().1.clone(); let last_tenure_header = &last_tenure.header; - let miner_sk = naka_conf.miner.mining_key.clone().unwrap(); + let miner_sk = naka_conf.miner.mining_key.unwrap(); let miner_pk = StacksPublicKey::from_private(&miner_sk); let mut sibling_block_header = NakamotoBlockHeader { version: 1, chain_length: last_tenure_header.chain_length, burn_spent: last_tenure_header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: last_tenure_header.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6693,8 +6612,8 @@ fn signer_chainstate() { version: 1, chain_length: last_tenure_header.chain_length, burn_spent: last_tenure_header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), - parent_block_id: last_tenure_header.parent_block_id.clone(), + consensus_hash: last_tenure_header.consensus_hash, + parent_block_id: last_tenure_header.parent_block_id, tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), timestamp: last_tenure_header.timestamp + 1, @@ -6751,7 +6670,7 @@ fn signer_chainstate() { version: 1, chain_length: reorg_to_block.header.chain_length + 1, burn_spent: reorg_to_block.header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: reorg_to_block.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6782,9 +6701,9 @@ fn signer_chainstate() { post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], payload: TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), - prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), - burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + tenure_consensus_hash: sibling_block_header.consensus_hash, + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash, + burn_view_consensus_hash: sibling_block_header.consensus_hash, previous_tenure_end: reorg_to_block.block_id(), previous_tenure_blocks: 1, cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, @@ -6812,12 +6731,12 @@ fn signer_chainstate() { // Case: the block contains a tenure change, but the parent tenure is a reorg let reorg_to_block = first_tenure_blocks.as_ref().unwrap().last().unwrap(); // make the sortition_view *think* that our block commit pointed at this old tenure - sortitions_view.cur_sortition.parent_tenure_id = reorg_to_block.header.consensus_hash.clone(); + sortitions_view.cur_sortition.parent_tenure_id = reorg_to_block.header.consensus_hash; let mut sibling_block_header = NakamotoBlockHeader { version: 1, chain_length: reorg_to_block.header.chain_length + 1, burn_spent: reorg_to_block.header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: reorg_to_block.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6848,9 +6767,9 @@ fn signer_chainstate() { post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], payload: TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), - prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), - burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + tenure_consensus_hash: sibling_block_header.consensus_hash, + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash, + burn_view_consensus_hash: sibling_block_header.consensus_hash, previous_tenure_end: reorg_to_block.block_id(), previous_tenure_blocks: 1, cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, @@ -6889,7 +6808,7 @@ fn signer_chainstate() { // every step of the return should be linked to the parent let mut prior: Option<&TenureForkingInfo> = None; for step in fork_info.iter().rev() { - if let Some(ref prior) = prior { + if let Some(prior) = prior { assert_eq!(prior.sortition_id, step.parent_sortition_id); } prior = Some(step); @@ -6928,7 +6847,7 @@ fn continue_tenure_extend() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let http_origin = naka_conf.node.data_url.clone(); @@ -6938,15 +6857,12 @@ fn continue_tenure_extend() { let send_amt = 1000; let send_fee = 200; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * 20, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let mut transfer_nonce = 0; @@ -7005,7 +6921,7 @@ fn continue_tenure_extend() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -7194,7 +7110,7 @@ fn continue_tenure_extend() { let mut has_extend = false; for tx in block["transactions"].as_array().unwrap() { let raw_tx = tx["raw_tx"].as_str().unwrap(); - if raw_tx == &transfer_tx_hex { + if raw_tx == transfer_tx_hex { transfer_tx_included = true; continue; } @@ -7203,8 +7119,9 @@ fn continue_tenure_extend() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => match payload.cause { + + if let TransactionPayload::TenureChange(payload) = &parsed.payload { + match payload.cause { TenureChangeCause::Extended => { has_extend = true; tenure_extends.push(parsed); @@ -7215,9 +7132,8 @@ fn continue_tenure_extend() { } tenure_block_founds.push(parsed); } - }, - _ => {} - }; + }; + } } last_block_had_extend = has_extend; } @@ -7242,7 +7158,7 @@ fn continue_tenure_extend() { // make sure prometheus returns an updated height #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -7280,8 +7196,8 @@ fn get_block_times( info!("Getting block times at block {block_height}, tenure {tenure_height}..."); let time0_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract0_name, "get-time", vec![&clarity::vm::Value::UInt(tenure_height)], @@ -7294,8 +7210,8 @@ fn get_block_times( .unwrap(); let time_now0_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract0_name, "get-last-time", vec![], @@ -7308,8 +7224,8 @@ fn get_block_times( .unwrap(); let time1_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract1_name, "get-time", vec![&clarity::vm::Value::UInt(tenure_height)], @@ -7322,8 +7238,8 @@ fn get_block_times( .unwrap(); let time1_now_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract1_name, "get-last-time", vec![], @@ -7336,8 +7252,8 @@ fn get_block_times( .unwrap(); let time3_tenure_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-tenure-time", vec![&clarity::vm::Value::UInt(block_height)], @@ -7350,8 +7266,8 @@ fn get_block_times( .unwrap(); let time3_block_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-block-time", vec![&clarity::vm::Value::UInt(block_height)], @@ -7364,8 +7280,8 @@ fn get_block_times( .unwrap(); let time3_now_tenure_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-last-tenure-time", vec![], @@ -7432,13 +7348,10 @@ fn check_block_times() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 12, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -7528,7 +7441,7 @@ fn check_block_times() { .unwrap() .expect_u128() .unwrap(); - info!("Time from pre-epoch 3.0: {}", time0); + info!("Time from pre-epoch 3.0: {time0}"); // This version uses the Clarity 1 / 2 function let contract1_name = "test-contract-1"; @@ -7833,13 +7746,10 @@ fn check_block_info() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 2, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let contract3_name = "test-contract-3"; @@ -7971,7 +7881,7 @@ fn check_block_info() { blind_signer(&naka_conf, &signers, proposals_submitted); let c0_block_ht_1_pre_3 = get_block_info(contract0_name, 1); - info!("Info from pre-epoch 3.0: {:?}", c0_block_ht_1_pre_3); + info!("Info from pre-epoch 3.0: {c0_block_ht_1_pre_3:?}"); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -8039,7 +7949,7 @@ fn check_block_info() { // one in the tenure) let info = get_chain_info(&naka_conf); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let last_stacks_block_height = info.stacks_tip_height as u128; let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let last_tenure_height: u128 = @@ -8062,7 +7972,7 @@ fn check_block_info() { .unwrap(); let info = get_chain_info(&naka_conf); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let cur_stacks_block_height = info.stacks_tip_height as u128; let cur_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let cur_tenure_height: u128 = @@ -8381,7 +8291,7 @@ fn check_block_info() { run_loop_thread.join().unwrap(); } -fn get_expected_reward_for_height(blocks: &Vec, block_height: u128) -> u128 { +fn get_expected_reward_for_height(blocks: &[serde_json::Value], block_height: u128) -> u128 { // Find the target block let target_block = blocks .iter() @@ -8468,13 +8378,10 @@ fn check_block_info_rewards() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 2, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -8560,7 +8467,7 @@ fn check_block_info_rewards() { blind_signer(&naka_conf, &signers, proposals_submitted); let tuple0 = get_block_info(contract0_name, 1); - info!("Info from pre-epoch 3.0: {:?}", tuple0); + info!("Info from pre-epoch 3.0: {tuple0:?}"); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -8672,7 +8579,7 @@ fn check_block_info_rewards() { } let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let (chainstate, _) = StacksChainState::open( naka_conf.is_mainnet(), naka_conf.burnchain.chain_id, @@ -8705,7 +8612,7 @@ fn check_block_info_rewards() { } let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let last_stacks_block_height = info.stacks_tip_height as u128; let blocks = test_observer::get_blocks(); @@ -8797,7 +8704,7 @@ fn mock_mining() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 3; let inter_blocks_per_tenure = 3; // setup sender + recipient for some test stx transfers @@ -8819,13 +8726,10 @@ fn mock_mining() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -9009,18 +8913,22 @@ fn mock_mining() { Ok(follower_naka_mined_blocks.load(Ordering::SeqCst) > follower_naka_mined_blocks_before) }) - .expect(&format!( - "Timed out waiting for mock miner block {}", - follower_naka_mined_blocks_before + 1 - )); + .unwrap_or_else(|_| { + panic!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + ) + }); wait_for(20, || { Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) }) - .expect(&format!( - "Timed out waiting for mock miner block {}", - follower_naka_mined_blocks_before + 1 - )); + .unwrap_or_else(|_| { + panic!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + ) + }); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -9046,9 +8954,7 @@ fn mock_mining() { let blocks_mock_mined = mock_mining_blocks_end - mock_mining_blocks_start; assert!( blocks_mock_mined >= tenure_count, - "Should have mock mined at least `tenure_count` nakamoto blocks. Mined = {}. Expected = {}", - blocks_mock_mined, - tenure_count, + "Should have mock mined at least `tenure_count` nakamoto blocks. Mined = {blocks_mock_mined}. Expected = {tenure_count}" ); // wait for follower to reach the chain tip @@ -9093,8 +8999,8 @@ fn utxo_check_on_startup_panic() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - println!("Nakamoto node started with config: {:?}", naka_conf); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + println!("Nakamoto node started with config: {naka_conf:?}"); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); @@ -9169,8 +9075,8 @@ fn utxo_check_on_startup_recover() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - println!("Nakamoto node started with config: {:?}", naka_conf); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + println!("Nakamoto node started with config: {naka_conf:?}"); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); @@ -9244,10 +9150,10 @@ fn v3_signer_api_endpoint() { let send_amt = 100; let send_fee = 180; conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); - conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); // only subscribe to the block proposal events @@ -9273,7 +9179,7 @@ fn v3_signer_api_endpoint() { let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); - let mut signers = TestSigners::new(vec![signer_sk.clone()]); + let mut signers = TestSigners::new(vec![signer_sk]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( &conf, @@ -9407,7 +9313,7 @@ fn skip_mining_long_tx() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.miner.nakamoto_attempt_time_ms = 5_000; @@ -9419,20 +9325,14 @@ fn skip_mining_long_tx() { let send_amt = 1000; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_1_addr.clone()).to_string(), + PrincipalData::from(sender_1_addr).to_string(), send_amt * 15 + send_fee * 15, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_2_addr.clone()).to_string(), - 10000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_2_addr).to_string(), 10000); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -9488,7 +9388,7 @@ fn skip_mining_long_tx() { wait_for_first_naka_block_commit(60, &commits_submitted); // submit a long running TX and the transfer TX - let input_list: Vec<_> = (1..100u64).into_iter().map(|x| x.to_string()).collect(); + let input_list: Vec<_> = (1..100u64).map(|x| x.to_string()).collect(); let input_list = input_list.join(" "); // Mine a few nakamoto tenures with some interim blocks in them diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index d6373a3b44..1f7252ec5f 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -158,7 +158,7 @@ fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAdd .unwrap() .burnchain .magic_bytes; - assert_eq!(magic_bytes.as_bytes(), &['T' as u8, '2' as u8]); + assert_eq!(magic_bytes.as_bytes(), b"T2"); conf.burnchain.magic_bytes = magic_bytes; conf.burnchain.poll_time_secs = 1; conf.node.pox_sync_sample_secs = 0; @@ -391,7 +391,7 @@ pub mod test_observer { let new_rawtxs = txs .as_array() .unwrap() - .into_iter() + .iter() .map(|x| x.as_str().unwrap().to_string()); let mut memtxs = MEMTXS.lock().unwrap(); for new_tx in new_rawtxs { @@ -408,7 +408,7 @@ pub mod test_observer { .unwrap() .as_array() .unwrap() - .into_iter() + .iter() .map(|x| x.as_str().unwrap().to_string()); let reason = txs.get("reason").unwrap().as_str().unwrap().to_string(); @@ -622,8 +622,7 @@ pub mod test_observer { // Find indexes in range for which we don't have burn block in set let missing = (start..=end) - .into_iter() - .filter(|i| !burn_block_heights.contains(&i)) + .filter(|i| !burn_block_heights.contains(i)) .collect::>(); if missing.is_empty() { @@ -725,20 +724,19 @@ pub fn run_until_burnchain_height( target_height: u64, conf: &Config, ) -> bool { - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); let mut current_height = tip_info.burn_block_height; while current_height < target_height { eprintln!( - "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({})", + "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({current_height})", get_epoch_time_secs(), - current_height ); - let next_result = next_block_and_wait(btc_regtest_controller, &blocks_processed); + let next_result = next_block_and_wait(btc_regtest_controller, blocks_processed); if !next_result { return false; } - let Ok(tip_info) = get_chain_info_result(&conf) else { + let Ok(tip_info) = get_chain_info_result(conf) else { sleep_ms(1000); continue; }; @@ -764,15 +762,12 @@ pub fn wait_for_runloop(blocks_processed: &Arc) { pub fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64) -> bool { let mut current = microblocks_processed.load(Ordering::SeqCst); let start = Instant::now(); - info!("Waiting for next microblock (current = {})", ¤t); + info!("Waiting for next microblock (current = {current})"); loop { let now = microblocks_processed.load(Ordering::SeqCst); if now == 0 && current != 0 { // wrapped around -- a new epoch started - info!( - "New microblock epoch started while waiting (originally {})", - current - ); + info!("New microblock epoch started while waiting (originally {current})"); current = 0; } @@ -781,24 +776,24 @@ pub fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64 } if start.elapsed() > Duration::from_secs(timeout) { - warn!("Timed out waiting for microblocks to process ({})", timeout); + warn!("Timed out waiting for microblocks to process ({timeout})"); return false; } thread::sleep(Duration::from_millis(100)); } info!("Next microblock acknowledged"); - return true; + true } /// returns Txid string upon success -pub fn submit_tx_fallible(http_origin: &str, tx: &Vec) -> Result { +pub fn submit_tx_fallible(http_origin: &str, tx: &[u8]) -> Result { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions", http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(tx.clone()) + .body(tx.to_vec()) .send() .unwrap(); if res.status().is_success() { @@ -817,16 +812,16 @@ pub fn submit_tx_fallible(http_origin: &str, tx: &Vec) -> Result) -> String { +pub fn submit_tx(http_origin: &str, tx: &[u8]) -> String { submit_tx_fallible(http_origin, tx).unwrap_or_else(|e| { - eprintln!("Submit tx error: {}", e); + eprintln!("Submit tx error: {e}"); panic!(""); }) } pub fn get_unconfirmed_tx(http_origin: &str, txid: &Txid) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions/unconfirmed/{}", http_origin, txid); + let path = format!("{http_origin}/v2/transactions/unconfirmed/{txid}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -840,14 +835,14 @@ pub fn get_unconfirmed_tx(http_origin: &str, txid: &Txid) -> Option { pub fn submit_block( http_origin: &str, consensus_hash: &ConsensusHash, - block: &Vec, + block: &[u8], ) -> StacksBlockAcceptedData { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/upload/{}", http_origin, consensus_hash); + let path = format!("{http_origin}/v2/blocks/upload/{consensus_hash}"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(block.clone()) + .body(block.to_owned()) .send() .unwrap(); @@ -862,21 +857,21 @@ pub fn submit_block( .block_hash() ) ); - return res; + res } else { eprintln!("{}", res.text().unwrap()); panic!(""); } } -pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash { +pub fn submit_microblock(http_origin: &str, mblock: &[u8]) -> BlockHeaderHash { let client = reqwest::blocking::Client::new(); let microblock = StacksMicroblock::consensus_deserialize(&mut &mblock[..]).unwrap(); - let path = format!("{}/v2/microblocks/{}", http_origin, microblock.block_hash()); + let path = format!("{http_origin}/v2/microblocks/{}", microblock.block_hash()); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(mblock.clone()) + .body(mblock.to_owned()) .send() .unwrap(); @@ -888,7 +883,7 @@ pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash .unwrap() .block_hash() ); - return res; + res } else { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -897,7 +892,7 @@ pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash pub fn get_block(http_origin: &str, block_id: &StacksBlockId) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/{}", http_origin, block_id); + let path = format!("{http_origin}/v2/blocks/{block_id}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -939,7 +934,7 @@ pub fn get_tip_anchored_block(conf: &Config) -> (ConsensusHash, StacksBlock) { // get the associated anchored block let http_origin = format!("http://{}", &conf.node.rpc_bind); let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/{}", &http_origin, &stacks_id_tip); + let path = format!("{http_origin}/v2/blocks/{stacks_id_tip}"); let block_bytes = client.get(&path).send().unwrap().bytes().unwrap(); let block = StacksBlock::consensus_deserialize(&mut block_bytes.as_ref()).unwrap(); @@ -972,10 +967,7 @@ pub fn call_read_only( info!("Call read only: {contract}.{function}({args:?})"); - let path = format!( - "{http_origin}/v2/contracts/call-read/{}/{}/{}", - principal, contract, function - ); + let path = format!("{http_origin}/v2/contracts/call-read/{principal}/{contract}/{function}"); let serialized_args = args .iter() @@ -1005,14 +997,13 @@ fn find_microblock_privkey( let mut keychain = Keychain::default(conf.node.seed.clone()); for ix in 0..max_tries { // the first rotation occurs at 203. - let privk = - keychain.make_microblock_secret_key(203 + ix, &((203 + ix) as u64).to_be_bytes()); + let privk = keychain.make_microblock_secret_key(203 + ix, &(203 + ix).to_be_bytes()); let pubkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(&privk)); if pubkh == *pubkey_hash { return Some(privk); } } - return None; + None } /// Returns true iff `b` is within `0.1%` of `a`. @@ -1088,7 +1079,7 @@ fn bitcoind_integration_test() { .filter(|block| block.get("burn_amount").unwrap().as_u64().unwrap() > 0) .collect(); assert!( - burn_blocks_with_burns.len() >= 1, + !burn_blocks_with_burns.is_empty(), "Burn block sortitions {} should be >= 1", burn_blocks_with_burns.len() ); @@ -1096,7 +1087,7 @@ fn bitcoind_integration_test() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1184,7 +1175,7 @@ fn confirm_unparsed_ongoing_ops() { bitcoin_regtest_controller::TEST_MAGIC_BYTES .lock() .unwrap() - .replace(['Z' as u8, 'Z' as u8]); + .replace([b'Z', b'Z']); // let's trigger another mining loop: this should create an invalid block commit. // this bitcoin block will contain the valid commit created before (so, a second stacks block) @@ -1209,7 +1200,7 @@ fn confirm_unparsed_ongoing_ops() { // query the miner's account nonce - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); @@ -1305,9 +1296,9 @@ fn most_recent_utxo_integration_test() { let smallest_utxo = smallest_utxo.unwrap(); let mut biggest_utxo = biggest_utxo.unwrap(); - eprintln!("Last-spent UTXO is {:?}", &last_utxo); - eprintln!("Smallest UTXO is {:?}", &smallest_utxo); - eprintln!("Biggest UTXO is {:?}", &biggest_utxo); + eprintln!("Last-spent UTXO is {last_utxo:?}"); + eprintln!("Smallest UTXO is {smallest_utxo:?}"); + eprintln!("Biggest UTXO is {biggest_utxo:?}"); assert_eq!(last_utxo, smallest_utxo); assert_ne!(biggest_utxo, last_utxo); @@ -1354,9 +1345,9 @@ pub fn get_account_result( account: &F, ) -> Result { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/accounts/{}?proof=0", http_origin, account); + let path = format!("{http_origin}/v2/accounts/{account}?proof=0"); let res = client.get(&path).send()?.json::()?; - info!("Account response: {:#?}", res); + info!("Account response: {res:#?}"); Ok(Account { balance: u128::from_str_radix(&res.balance[2..], 16).unwrap(), locked: u128::from_str_radix(&res.locked[2..], 16).unwrap(), @@ -1371,19 +1362,19 @@ pub fn get_account(http_origin: &str, account: &F) -> Acco pub fn get_neighbors(conf: &Config) -> Option { let client = reqwest::blocking::Client::new(); let http_origin = format!("http://{}", &conf.node.rpc_bind); - let path = format!("{}/v2/neighbors", http_origin); + let path = format!("{http_origin}/v2/neighbors"); client.get(&path).send().ok()?.json().ok() } pub fn get_pox_info(http_origin: &str) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/pox", http_origin); + let path = format!("{http_origin}/v2/pox"); client.get(&path).send().ok()?.json::().ok() } fn get_chain_tip(http_origin: &str) -> (ConsensusHash, BlockHeaderHash) { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/info", http_origin); + let path = format!("{http_origin}/v2/info"); let res = client .get(&path) .send() @@ -1404,7 +1395,7 @@ fn get_chain_tip(http_origin: &str) -> (ConsensusHash, BlockHeaderHash) { fn get_chain_tip_height(http_origin: &str) -> u64 { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/info", http_origin); + let path = format!("{http_origin}/v2/info"); let res = client .get(&path) .send() @@ -1427,10 +1418,8 @@ pub fn get_contract_src( } else { "".to_string() }; - let path = format!( - "{}/v2/contracts/source/{}/{}{}", - http_origin, contract_addr, contract_name, query_string - ); + let path = + format!("{http_origin}/v2/contracts/source/{contract_addr}/{contract_name}{query_string}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -1848,7 +1837,7 @@ fn lockup_integration() { } } } - assert_eq!(found, true); + assert!(found); // block #2 won't unlock STX next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1882,7 +1871,7 @@ fn stx_transfer_btc_integration_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let _spender_btc_addr = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Regtest, LegacyBitcoinAddressType::PublicKeyHash, @@ -1892,7 +1881,7 @@ fn stx_transfer_btc_integration_test() { let spender_2_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk); - let spender_2_addr: PrincipalData = spender_2_stx_addr.clone().into(); + let spender_2_addr: PrincipalData = spender_2_stx_addr.into(); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -1948,7 +1937,7 @@ fn stx_transfer_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -1975,8 +1964,8 @@ fn stx_transfer_btc_integration_test() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -1986,7 +1975,7 @@ fn stx_transfer_btc_integration_test() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -2017,7 +2006,7 @@ fn stx_transfer_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2046,8 +2035,8 @@ fn stx_transfer_btc_integration_test() { // let's fire off our transfer op. let transfer_stx_op = TransferStxOp { - sender: spender_2_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_2_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -2057,7 +2046,7 @@ fn stx_transfer_btc_integration_test() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -2111,7 +2100,7 @@ fn stx_delegate_btc_integration_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); @@ -2119,11 +2108,7 @@ fn stx_delegate_btc_integration_test() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -2132,7 +2117,7 @@ fn stx_delegate_btc_integration_test() { amount: 100300, }); conf.initial_balances.push(InitialBalance { - address: recipient_addr.clone().into(), + address: recipient_addr.into(), amount: 300, }); @@ -2226,7 +2211,7 @@ fn stx_delegate_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2252,8 +2237,8 @@ fn stx_delegate_btc_integration_test() { // let's fire off our delegate op. let del_stx_op = DelegateStxOp { - sender: spender_stx_addr.clone(), - delegate_to: recipient_addr.clone(), + sender: spender_stx_addr, + delegate_to: recipient_addr, reward_addr: None, delegated_ustx: 100_000, // to be filled in @@ -2264,7 +2249,7 @@ fn stx_delegate_btc_integration_test() { until_burn_height: None, }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -2298,7 +2283,7 @@ fn stx_delegate_btc_integration_test() { Value::Principal(spender_addr.clone()), Value::UInt(100_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -2372,7 +2357,7 @@ fn stack_stx_burn_op_test() { let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr_1: StacksAddress = to_addr(&spender_sk_1); - let spender_addr_1: PrincipalData = spender_stx_addr_1.clone().into(); + let spender_addr_1: PrincipalData = spender_stx_addr_1.into(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_stx_addr_2: StacksAddress = to_addr(&spender_sk_2); @@ -2390,7 +2375,7 @@ fn stack_stx_burn_op_test() { amount: first_bal, }); conf.initial_balances.push(InitialBalance { - address: recipient_addr.clone().into(), + address: recipient_addr.into(), amount: second_bal, }); @@ -2506,8 +2491,8 @@ fn stack_stx_burn_op_test() { info!("Bootstrapped to 2.5, submitting stack-stx and pre-stx op..."); - let signer_sk_1 = spender_sk_1.clone(); - let signer_sk_2 = spender_sk_2.clone(); + let signer_sk_1 = spender_sk_1; + let signer_sk_2 = spender_sk_2; let signer_pk_1 = StacksPublicKey::from_private(&signer_sk_1); let pox_addr = PoxAddress::Standard(spender_stx_addr_1, Some(AddressHashMode::SerializeP2PKH)); @@ -2540,7 +2525,7 @@ fn stack_stx_burn_op_test() { let mut miner_signer_2 = Keychain::default(conf.node.seed.clone()).generate_op_signer(); let pre_stx_op_2 = PreStxOp { - output: spender_stx_addr_2.clone(), + output: spender_stx_addr_2, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2619,13 +2604,13 @@ fn stack_stx_burn_op_test() { // `stacked_ustx` should be large enough to avoid ERR_STACKING_THRESHOLD_NOT_MET from Clarity let stack_stx_op_with_some_signer_key = BlockstackOperationType::StackStx(StackStxOp { - sender: spender_stx_addr_1.clone(), + sender: spender_stx_addr_1, reward_addr: pox_addr.clone(), stacked_ustx: 10000000000000, num_cycles: 6, signer_key: Some(signer_key), max_amount: Some(u128::MAX), - auth_id: Some(auth_id.into()), + auth_id: Some(auth_id), // to be filled in vtxindex: 0, txid: Txid([0u8; 32]), @@ -2633,7 +2618,7 @@ fn stack_stx_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); + let mut spender_signer_1 = BurnchainOpSigner::new(signer_sk_1, false); assert!( btc_regtest_controller .submit_operation( @@ -2647,7 +2632,7 @@ fn stack_stx_burn_op_test() { ); let stack_stx_op_with_no_signer_key = BlockstackOperationType::StackStx(StackStxOp { - sender: spender_stx_addr_2.clone(), + sender: spender_stx_addr_2, reward_addr: pox_addr.clone(), stacked_ustx: 10000000000000, num_cycles: 6, @@ -2661,7 +2646,7 @@ fn stack_stx_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); + let mut spender_signer_2 = BurnchainOpSigner::new(signer_sk_2, false); assert!( btc_regtest_controller .submit_operation( @@ -2740,7 +2725,7 @@ fn stack_stx_burn_op_test() { for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh).unwrap(); for stacking_op in stacking_ops.into_iter() { - debug!("Stacking op queried from sortdb: {:?}", stacking_op); + debug!("Stacking op queried from sortdb: {stacking_op:?}"); match stacking_op.signer_key { Some(_) => found_some = true, None => found_none = true, @@ -2775,17 +2760,13 @@ fn vote_for_aggregate_key_burn_op_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let pox_pubkey = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let _pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let _pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -2912,7 +2893,7 @@ fn vote_for_aggregate_key_burn_op_test() { // setup stack-stx tx - let signer_sk = spender_sk.clone(); + let signer_sk = spender_sk; let signer_pk = StacksPublicKey::from_private(&signer_sk); let pox_addr = PoxAddress::Standard(spender_stx_addr, Some(AddressHashMode::SerializeP2PKH)); @@ -2959,7 +2940,7 @@ fn vote_for_aggregate_key_burn_op_test() { let mut miner_signer = Keychain::default(conf.node.seed.clone()).generate_op_signer(); let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -3012,7 +2993,7 @@ fn vote_for_aggregate_key_burn_op_test() { BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { signer_key, signer_index, - sender: spender_stx_addr.clone(), + sender: spender_stx_addr, round: 0, reward_cycle, aggregate_key, @@ -3023,7 +3004,7 @@ fn vote_for_aggregate_key_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer = BurnchainOpSigner::new(signer_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(signer_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -3251,16 +3232,16 @@ fn bitcoind_forking_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); @@ -3355,17 +3336,17 @@ fn should_fix_2771() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // okay, let's figure out the burn block we want to fork away. let reorg_height = 208; - warn!("Will trigger re-org at block {}", reorg_height); + warn!("Will trigger re-org at block {reorg_height}"); let burn_header_hash_to_fork = btc_regtest_controller.get_block_hash(reorg_height); btc_regtest_controller.invalidate_block(&burn_header_hash_to_fork); btc_regtest_controller.build_next_block(1); @@ -3407,10 +3388,10 @@ fn make_signed_microblock( version: rng.gen(), sequence: seq, prev_block: parent_block, - tx_merkle_root: tx_merkle_root, + tx_merkle_root, signature: MessageSignature([0u8; 65]), }, - txs: txs, + txs, }; mblock.sign(block_privk).unwrap(); mblock @@ -3585,9 +3566,8 @@ fn microblock_fork_poison_integration_test() { make_signed_microblock(&privk, vec![second_unconfirmed_tx], stacks_tip, 1); eprintln!( - "Created second conflicting microblock: {}: {:?}", - &second_microblock.block_hash(), - &second_microblock + "Created second conflicting microblock: {}: {second_microblock:?}", + &second_microblock.block_hash() ); (first_microblock, second_microblock) }; @@ -3598,7 +3578,7 @@ fn microblock_fork_poison_integration_test() { .unwrap(); // post the first microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3616,7 +3596,7 @@ fn microblock_fork_poison_integration_test() { .unwrap(); // post the second microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3737,7 +3717,7 @@ fn microblock_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // let's query the miner's account nonce: - info!("Miner account: {}", miner_account); + info!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 1); @@ -3854,9 +3834,8 @@ fn microblock_integration_test() { ); eprintln!( - "Created first microblock: {}: {:?}", - &first_microblock.block_hash(), - &first_microblock + "Created first microblock: {}: {first_microblock:?}", + &first_microblock.block_hash() ); /* let second_microblock = @@ -3869,9 +3848,8 @@ fn microblock_integration_test() { 1, ); eprintln!( - "Created second microblock: {}: {:?}", - &second_microblock.block_hash(), - &second_microblock + "Created second microblock: {}: {second_microblock:?}", + &second_microblock.block_hash() ); (first_microblock, second_microblock) }; @@ -3882,7 +3860,7 @@ fn microblock_integration_test() { .unwrap(); // post the first microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3894,7 +3872,7 @@ fn microblock_integration_test() { assert_eq!(res, format!("{}", &first_microblock.block_hash())); - eprintln!("\n\nBegin testing\nmicroblock: {:?}\n\n", &first_microblock); + eprintln!("\n\nBegin testing\nmicroblock: {first_microblock:?}\n\n"); let account = get_account(&http_origin, &spender_addr); assert_eq!(account.nonce, 1); @@ -3906,7 +3884,7 @@ fn microblock_integration_test() { .unwrap(); // post the second microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -4090,13 +4068,11 @@ fn microblock_integration_test() { // we can query unconfirmed state from the microblock we announced let path = format!( - "{}/v2/accounts/{}?proof=0&tip={}", - &http_origin, - &spender_addr, + "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", &tip_info.unanchored_tip.unwrap() ); - eprintln!("{:?}", &path); + eprintln!("{path:?}"); let mut iter_count = 0; let res = loop { @@ -4107,7 +4083,7 @@ fn microblock_integration_test() { match http_resp.json::() { Ok(x) => break x, Err(e) => { - warn!("Failed to query {}; will try again. Err = {:?}", &path, e); + warn!("Failed to query {path}; will try again. Err = {e:?}"); iter_count += 1; assert!(iter_count < 10, "Retry limit reached querying account"); sleep_ms(1000); @@ -4116,17 +4092,14 @@ fn microblock_integration_test() { }; }; - info!("Account Response = {:#?}", res); + info!("Account Response = {res:#?}"); assert_eq!(res.nonce, 2); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 96300); // limited by chaining for next_nonce in 2..5 { // verify that the microblock miner can automatically pick up transactions - debug!( - "Try to send unconfirmed tx from {} to {} nonce {}", - &spender_addr, &recipient, next_nonce - ); + debug!("Try to send unconfirmed tx from {spender_addr} to {recipient} nonce {next_nonce}"); let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( &spender_sk, next_nonce, @@ -4136,14 +4109,14 @@ fn microblock_integration_test() { 1000, ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(unconfirmed_tx_bytes.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -4153,7 +4126,7 @@ fn microblock_integration_test() { .txid() .to_string() ); - eprintln!("Sent {}", &res); + eprintln!("Sent {res}"); } else { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -4171,15 +4144,13 @@ fn microblock_integration_test() { // we can query _new_ unconfirmed state from the microblock we announced let path = format!( - "{}/v2/accounts/{}?proof=0&tip={}", - &http_origin, - &spender_addr, + "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", &tip_info.unanchored_tip.unwrap() ); let res_text = client.get(&path).send().unwrap().text().unwrap(); - eprintln!("text of {}\n{}", &path, &res_text); + eprintln!("text of {path}\n{res_text}"); let res = client .get(&path) @@ -4187,8 +4158,8 @@ fn microblock_integration_test() { .unwrap() .json::() .unwrap(); - eprintln!("{:?}", &path); - eprintln!("{:#?}", res); + eprintln!("{path:?}"); + eprintln!("{res:#?}"); // advanced! assert_eq!(res.nonce, next_nonce + 1); @@ -4209,10 +4180,7 @@ fn filter_low_fee_tx_integration_test() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); @@ -4232,7 +4200,7 @@ fn filter_low_fee_tx_integration_test() { if ix < 5 { // low-fee make_stacks_transfer( - &spender_sk, + spender_sk, 0, 1000 + (ix as u64), conf.burnchain.chain_id, @@ -4242,7 +4210,7 @@ fn filter_low_fee_tx_integration_test() { } else { // high-fee make_stacks_transfer( - &spender_sk, + spender_sk, 0, 2000 + (ix as u64), conf.burnchain.chain_id, @@ -4296,14 +4264,9 @@ fn filter_low_fee_tx_integration_test() { // First five accounts have a transaction. The miner will consider low fee transactions, // but rank by estimated fee rate. - for i in 0..5 { - let account = get_account(&http_origin, &spender_addrs[i]); - assert_eq!(account.nonce, 1); - } - - // last five accounts have transaction - for i in 5..10 { - let account = get_account(&http_origin, &spender_addrs[i]); + // Last five accounts have transaction + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, spender_addr); assert_eq!(account.nonce, 1); } @@ -4317,10 +4280,7 @@ fn filter_long_runtime_tx_integration_test() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); @@ -4341,7 +4301,7 @@ fn filter_long_runtime_tx_integration_test() { .map(|(ix, spender_sk)| { let recipient = StacksAddress::from_string(ADDR_4).unwrap(); make_stacks_transfer( - &spender_sk, + spender_sk, 0, 1000 + (ix as u64), conf.burnchain.chain_id, @@ -4393,8 +4353,8 @@ fn filter_long_runtime_tx_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // no transactions mined - for i in 0..10 { - let account = get_account(&http_origin, &spender_addrs[i]); + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, &spender_addr); assert_eq!(account.nonce, 0); } @@ -4517,10 +4477,7 @@ fn size_check_integration_test() { giant_contract.push(' '); } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4652,13 +4609,10 @@ fn size_check_integration_test() { panic!("Spender address nonce incremented past 1"); } - debug!("Spender {},{}: {:?}", ix, &spender_addr, &res); + debug!("Spender {ix},{spender_addr}: {res:?}"); } - eprintln!( - "anchor_block_txs: {}, micro_block_txs: {}", - anchor_block_txs, micro_block_txs - ); + eprintln!("anchor_block_txs: {anchor_block_txs}, micro_block_txs: {micro_block_txs}"); if anchor_block_txs >= 2 && micro_block_txs >= 2 { break; @@ -4693,10 +4647,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4723,7 +4674,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { i as u64, 1100000, conf.burnchain.chain_id, - &format!("small-{}", i), + &format!("small-{i}"), &small_contract, ); ret.push(tx); @@ -4849,10 +4800,10 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("large-").is_some() { + if tsc.name.to_string().contains("large-") { num_big_anchored_txs += 1; total_big_txs_per_block += 1; - } else if tsc.name.to_string().find("small").is_some() { + } else if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -4868,8 +4819,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, max_big_txs_per_block: {}, total_big_txs_per_block: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, max_big_txs_per_block, total_big_txs_per_block, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}, total_big_txs_per_block: {total_big_txs_per_block}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert!(max_big_txs_per_block > 0); @@ -4902,10 +4852,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..20) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..20).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4930,15 +4877,14 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { let txs: Vec<_> = spender_sks .iter() .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( + make_contract_publish_microblock_only( spender_sk, 0, 600000, conf.burnchain.chain_id, "small", &small_contract, - ); - tx + ) }) .collect(); @@ -5049,7 +4995,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("small").is_some() { + if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -5061,8 +5007,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert_eq!(max_big_txs_per_microblock, 5); @@ -5090,10 +5035,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..25) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..25).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -5115,15 +5057,14 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { let txs: Vec> = spender_sks .iter() .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( + make_contract_publish_microblock_only( spender_sk, 0, 1149230, conf.burnchain.chain_id, "small", &small_contract, - ); - tx + ) }) .collect(); @@ -5222,7 +5163,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("small").is_some() { + if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -5234,8 +5175,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert_eq!(max_big_txs_per_microblock, 3); @@ -5252,13 +5192,9 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { return; } - let spender_sks: Vec<_> = (0..4) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..4).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let spender_addrs_c32: Vec = - spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let spender_addrs_c32: Vec = spender_sks.iter().map(to_addr).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -5292,7 +5228,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { 0, 1049230, conf.burnchain.chain_id, - &format!("large-{}", ix), + &format!("large-{ix}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -5336,7 +5272,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { (begin (crash-me \"{}\")) ", - &format!("large-contract-{}-{}", &spender_addrs_c32[ix], &ix) + &format!("large-contract-{}-{ix}", &spender_addrs_c32[ix]) ) )] } else { @@ -5347,7 +5283,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { i as u64, 210000, conf.burnchain.chain_id, - &format!("small-{}-{}", ix, i), + &format!("small-{ix}-{i}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -5390,7 +5326,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { ) (begin (crash-me \"{}\")) - ", &format!("small-contract-{}-{}-{}", &spender_addrs_c32[ix], &ix, i)) + ", &format!("small-contract-{}-{ix}-{i}", &spender_addrs_c32[ix])) ); ret.push(tx); } @@ -5486,7 +5422,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { let mut total_big_txs_in_microblocks = 0; for block in blocks { - eprintln!("block {:?}", &block); + eprintln!("block {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); let mut num_big_anchored_txs = 0; @@ -5499,12 +5435,12 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - eprintln!("tx: {:?}", &parsed); + eprintln!("tx: {parsed:?}"); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("large-").is_some() { + if tsc.name.to_string().contains("large-") { num_big_anchored_txs += 1; total_big_txs_in_blocks += 1; - } else if tsc.name.to_string().find("small").is_some() { + } else if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_in_microblocks += 1; } @@ -5520,12 +5456,10 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { } info!( - "max_big_txs_per_microblock: {}, max_big_txs_per_block: {}", - max_big_txs_per_microblock, max_big_txs_per_block + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}" ); info!( - "total_big_txs_in_microblocks: {}, total_big_txs_in_blocks: {}", - total_big_txs_in_microblocks, total_big_txs_in_blocks + "total_big_txs_in_microblocks: {total_big_txs_in_microblocks}, total_big_txs_in_blocks: {total_big_txs_in_blocks}" ); // at most one big tx per block and at most one big tx per stream, always. @@ -5605,7 +5539,7 @@ fn block_replay_integration_test() { // let's query the miner's account nonce: - info!("Miner account: {}", miner_account); + info!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 1); @@ -5638,7 +5572,7 @@ fn block_replay_integration_test() { tip_block.consensus_serialize(&mut tip_block_bytes).unwrap(); for i in 0..1024 { - let path = format!("{}/v2/blocks/upload/{}", &http_origin, &tip_consensus_hash); + let path = format!("{http_origin}/v2/blocks/upload/{tip_consensus_hash}"); let res_text = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -5648,7 +5582,7 @@ fn block_replay_integration_test() { .text() .unwrap(); - eprintln!("{}: text of {}\n{}", i, &path, &res_text); + eprintln!("{i}: text of {path}\n{res_text}"); } test_observer::clear(); @@ -6022,11 +5956,11 @@ fn mining_events_integration_test() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10000000, }); conf.initial_balances.push(InitialBalance { - address: addr_2.clone().into(), + address: addr_2.into(), amount: 10000000, }); @@ -6121,7 +6055,7 @@ fn mining_events_integration_test() { // check mined microblock events let mined_microblock_events = test_observer::get_mined_microblocks(); - assert!(mined_microblock_events.len() >= 1); + assert!(!mined_microblock_events.is_empty()); // check tx events in the first microblock // 1 success: 1 contract publish, 2 error (on chain transactions) @@ -6136,15 +6070,12 @@ fn mining_events_integration_test() { execution_cost, .. }) => { - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); assert_eq!(fee, &620000); assert_eq!( execution_cost, @@ -6176,15 +6107,12 @@ fn mining_events_integration_test() { txid.to_string(), "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6" ); - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); } _ => panic!("unexpected event type"), } @@ -6197,15 +6125,12 @@ fn mining_events_integration_test() { execution_cost, .. }) => { - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); assert_eq!(fee, &600000); assert_eq!( execution_cost, @@ -6304,7 +6229,7 @@ fn block_limit_hit_integration_test() { let (mut conf, _miner_account) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10_000_000, }); conf.initial_balances.push(InitialBalance { @@ -6432,8 +6357,8 @@ fn block_limit_hit_integration_test() { assert_eq!(tx_third_block.len(), 3); let txid_1_exp = tx_third_block[1].get("txid").unwrap().as_str().unwrap(); let txid_4_exp = tx_third_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_1), txid_1_exp); - assert_eq!(format!("0x{}", txid_4), txid_4_exp); + assert_eq!(format!("0x{txid_1}"), txid_1_exp); + assert_eq!(format!("0x{txid_4}"), txid_4_exp); let tx_fourth_block = mined_block_events[4] .get("transactions") @@ -6443,8 +6368,8 @@ fn block_limit_hit_integration_test() { assert_eq!(tx_fourth_block.len(), 3); let txid_2_exp = tx_fourth_block[1].get("txid").unwrap().as_str().unwrap(); let txid_3_exp = tx_fourth_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_2), txid_2_exp); - assert_eq!(format!("0x{}", txid_3), txid_3_exp); + assert_eq!(format!("0x{txid_2}"), txid_2_exp); + assert_eq!(format!("0x{txid_3}"), txid_3_exp); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6516,7 +6441,7 @@ fn microblock_limit_hit_integration_test() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10_000_000, }); conf.initial_balances.push(InitialBalance { @@ -6656,10 +6581,7 @@ fn microblock_limit_hit_integration_test() { let txid_3 = submit_tx(&http_origin, &tx_3); let txid_4 = submit_tx(&http_origin, &tx_4); - eprintln!( - "transactions: {},{},{},{}", - &txid_1, &txid_2, &txid_3, &txid_4 - ); + eprintln!("transactions: {txid_1},{txid_2},{txid_3},{txid_4}"); sleep_ms(50_000); @@ -6702,8 +6624,8 @@ fn microblock_limit_hit_integration_test() { assert_eq!(tx_first_mblock.len(), 2); let txid_1_exp = tx_first_mblock[0].get("txid").unwrap().as_str().unwrap(); let txid_4_exp = tx_first_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_1), txid_1_exp); - assert_eq!(format!("0x{}", txid_4), txid_4_exp); + assert_eq!(format!("0x{txid_1}"), txid_1_exp); + assert_eq!(format!("0x{txid_4}"), txid_4_exp); let tx_second_mblock = mined_mblock_events[1] .get("transactions") @@ -6713,8 +6635,8 @@ fn microblock_limit_hit_integration_test() { assert_eq!(tx_second_mblock.len(), 2); let txid_2_exp = tx_second_mblock[0].get("txid").unwrap().as_str().unwrap(); let txid_3_exp = tx_second_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_2), txid_2_exp); - assert_eq!(format!("0x{}", txid_3), txid_3_exp); + assert_eq!(format!("0x{txid_2}"), txid_2_exp); + assert_eq!(format!("0x{txid_3}"), txid_3_exp); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6761,7 +6683,7 @@ fn block_large_tx_integration_test() { test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { - address: spender_addr.clone().into(), + address: spender_addr.into(), amount: 10000000, }); @@ -6837,10 +6759,7 @@ fn block_large_tx_integration_test() { let normal_txid = submit_tx(&http_origin, &tx); let huge_txid = submit_tx(&http_origin, &tx_2); - eprintln!( - "Try to mine a too-big tx. Normal = {}, TooBig = {}", - &normal_txid, &huge_txid - ); + eprintln!("Try to mine a too-big tx. Normal = {normal_txid}, TooBig = {huge_txid}"); next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 1200); eprintln!("Finished trying to mine a too-big tx"); @@ -6848,7 +6767,7 @@ fn block_large_tx_integration_test() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", huge_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6898,7 +6817,7 @@ fn microblock_large_tx_integration_test_FLAKY() { test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10000000, }); @@ -6981,7 +6900,7 @@ fn microblock_large_tx_integration_test_FLAKY() { // Check that the microblock contains the first tx. let microblock_events = test_observer::get_microblocks(); - assert!(microblock_events.len() >= 1); + assert!(!microblock_events.is_empty()); let microblock = microblock_events[0].clone(); let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); @@ -6994,7 +6913,7 @@ fn microblock_large_tx_integration_test_FLAKY() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", huge_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); test_observer::clear(); channel.stop_chains_coordinator(); @@ -7020,18 +6939,10 @@ fn pox_integration_test() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let pox_2_pubkey = Secp256k1PublicKey::from_private(&StacksPrivateKey::new()); - let pox_2_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_2_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_2_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_2_pubkey).to_bytes()); let pox_2_address = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Testnet, @@ -7145,15 +7056,12 @@ fn pox_integration_test() { let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 0); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 0); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 210); @@ -7191,7 +7099,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7205,14 +7113,14 @@ fn pox_integration_test() { submit_tx(&http_origin, &tx); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); test_observer::clear(); // now let's mine until the next reward cycle starts ... while sort_height < ((14 * pox_constants.reward_cycle_length) + 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); @@ -7220,16 +7128,13 @@ fn pox_integration_test() { .block_height_to_reward_cycle(sort_height) .expect("Expected to be able to get reward cycle"); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); assert!(pox_info.pox_activation_threshold_ustx > 1500000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 1000000000000000); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); @@ -7281,8 +7186,7 @@ fn pox_integration_test() { // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward // cycle length of 15 blocks, is a burnchain height of 300) assert_eq!(parsed.to_string(), - format!("(ok (tuple (lock-amount u1000000000000000) (stacker {}) (unlock-burn-height u300)))", - &spender_addr)); + format!("(ok (tuple (lock-amount u1000000000000000) (stacker {spender_addr}) (unlock-burn-height u300)))")); tested = true; } } @@ -7307,7 +7211,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal / 2), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_2_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7331,7 +7235,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal / 2), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_2_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7348,20 +7252,17 @@ fn pox_integration_test() { while sort_height < ((15 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 2000000000000000); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); @@ -7403,19 +7304,16 @@ fn pox_integration_test() { while sort_height < ((16 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 2000000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + assert!(pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 240); assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 235); assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); @@ -7472,11 +7370,11 @@ fn pox_integration_test() { assert_eq!(recipient_slots.len(), 2); assert_eq!( - recipient_slots.get(&format!("{}", &pox_2_address)).cloned(), + recipient_slots.get(&format!("{pox_2_address}")).cloned(), Some(7u64) ); assert_eq!( - recipient_slots.get(&format!("{}", &pox_1_address)).cloned(), + recipient_slots.get(&format!("{pox_1_address}")).cloned(), Some(7u64) ); @@ -7490,7 +7388,7 @@ fn pox_integration_test() { while sort_height < ((17 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // get the canonical chain tip @@ -7513,7 +7411,7 @@ fn pox_integration_test() { while sort_height < ((18 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); @@ -7661,7 +7559,7 @@ fn atlas_integration_test() { // (stx-to-burn uint)) let namespace = "passport"; let salt = "some-salt"; - let salted_namespace = format!("{}{}", namespace, salt); + let salted_namespace = format!("{namespace}{salt}"); let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); let tx_1 = make_contract_call( &user_1, @@ -7677,14 +7575,14 @@ fn atlas_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_1.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -7759,14 +7657,14 @@ fn atlas_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_2.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -7810,14 +7708,14 @@ fn atlas_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -7830,7 +7728,7 @@ fn atlas_integration_test() { while sort_height < few_blocks { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Then check that the follower is correctly replicating the attachment @@ -7852,27 +7750,23 @@ fn atlas_integration_test() { while sort_height < few_blocks { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Poll GET v2/attachments/ for i in 1..10 { let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { + while !attachments_did_sync { let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); assert_eq!(attachment_response.attachment.content, zonefile_hex); @@ -7944,20 +7838,16 @@ fn atlas_integration_test() { // Now wait for the node to sync the attachment let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { + while !attachments_did_sync { let zonefile_hex = "facade00"; let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { eprintln!("Success syncing attachment - {}", res.text().unwrap()); attachments_did_sync = true; @@ -7966,7 +7856,7 @@ fn atlas_integration_test() { if timeout == 0 { panic!("Failed syncing 1 attachments between 2 neon runloops within 60s - Something is wrong"); } - eprintln!("Attachment {} not sync'd yet", zonefile_hex); + eprintln!("Attachment {zonefile_hex} not sync'd yet"); thread::sleep(Duration::from_millis(1000)); } } @@ -7980,7 +7870,7 @@ fn atlas_integration_test() { let namespace = "passport"; for i in 1..10 { let user = StacksPrivateKey::new(); - let zonefile_hex = format!("facade0{}", i); + let zonefile_hex = format!("facade0{i}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); let name = format!("johndoe{}", i); let tx = make_contract_call( @@ -8007,14 +7897,14 @@ fn atlas_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8040,20 +7930,16 @@ fn atlas_integration_test() { for i in 1..10 { let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { - let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); + while !attachments_did_sync { + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); assert_eq!(attachment_response.attachment.content, zonefile_hex); @@ -8072,7 +7958,7 @@ fn atlas_integration_test() { // Ensure that we the attached sidecar was able to receive a total of 10 attachments // This last assertion is flacky for some reason, it does not worth bullying the CI or disabling this whole test // We're using an inequality as a best effort, to make sure that **some** attachments were received. - assert!(test_observer::get_attachments().len() > 0); + assert!(!test_observer::get_attachments().is_empty()); test_observer::clear(); channel.stop_chains_coordinator(); @@ -8122,8 +8008,8 @@ fn antientropy_integration_test() { // Prepare the config of the follower node let (mut conf_follower_node, _) = neon_integration_test_conf(); let bootstrap_node_url = format!( - "{}@{}", - bootstrap_node_public_key, conf_bootstrap_node.node.p2p_bind + "{bootstrap_node_public_key}@{}", + conf_bootstrap_node.node.p2p_bind ); conf_follower_node.connection_options.disable_block_download = true; conf_follower_node.node.set_bootstrap_nodes( @@ -8195,10 +8081,10 @@ fn antientropy_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); for i in 0..(target_height - 3) { - eprintln!("Mine block {}", i); + eprintln!("Mine block {i}"); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Let's setup the follower now. @@ -8214,11 +8100,11 @@ fn antientropy_integration_test() { println!("Follower has finished"); } Ok(x) => { - println!("Follower gave a bad signal: {:?}", &x); + println!("Follower gave a bad signal: {x:?}"); panic!(); } Err(e) => { - println!("Failed to recv: {:?}", &e); + println!("Failed to recv: {e:?}"); panic!(); } }; @@ -8255,8 +8141,7 @@ fn antientropy_integration_test() { let mut sort_height = channel.get_sortitions_processed(); while sort_height < (target_height + 200) as u64 { eprintln!( - "Follower sortition is {}, target is {}", - sort_height, + "Follower sortition is {sort_height}, target is {}", target_height + 200 ); wait_for_runloop(&blocks_processed); @@ -8269,8 +8154,7 @@ fn antientropy_integration_test() { // wait for block height to reach target let mut tip_height = get_chain_tip_height(&http_origin); eprintln!( - "Follower Stacks tip height is {}, wait until {} >= {} - 3", - tip_height, tip_height, target_height + "Follower Stacks tip height is {tip_height}, wait until {tip_height} >= {target_height} - 3" ); let btc_regtest_controller = BitcoinRegtestController::with_burnchain( @@ -8285,7 +8169,7 @@ fn antientropy_integration_test() { sleep_ms(1000); tip_height = get_chain_tip_height(&http_origin); - eprintln!("Follower Stacks tip height is {}", tip_height); + eprintln!("Follower Stacks tip height is {tip_height}"); if burnchain_deadline < get_epoch_time_secs() { burnchain_deadline = get_epoch_time_secs() + 60; @@ -8304,12 +8188,13 @@ fn antientropy_integration_test() { channel.stop_chains_coordinator(); } +#[allow(clippy::too_many_arguments)] fn wait_for_mined( btc_regtest_controller: &mut BitcoinRegtestController, blocks_processed: &Arc, http_origin: &str, users: &[StacksPrivateKey], - account_before_nonces: &Vec, + account_before_nonces: &[u64], batch_size: usize, batches: usize, index_block_hashes: &mut Vec, @@ -8318,7 +8203,7 @@ fn wait_for_mined( let mut account_after_nonces = vec![0; batches * batch_size]; let mut all_mined = false; for _k in 0..10 { - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); sleep_ms(10_000); let (ch, bhh) = get_chain_tip(http_origin); @@ -8327,29 +8212,28 @@ fn wait_for_mined( if let Some(last_ibh) = index_block_hashes.last() { if *last_ibh != ibh { index_block_hashes.push(ibh); - eprintln!("Tip is now {}", &ibh); + eprintln!("Tip is now {ibh}"); } } for j in 0..batches * batch_size { - let account_after = get_account(&http_origin, &to_addr(&users[j])); + let account_after = get_account(http_origin, &to_addr(&users[j])); let account_after_nonce = account_after.nonce; account_after_nonces[j] = account_after_nonce; - if account_before_nonces[j] + 1 <= account_after_nonce { + if account_before_nonces[j] < account_after_nonce { all_mined_vec[j] = true; } } - all_mined = all_mined_vec.iter().fold(true, |acc, elem| acc && *elem); + all_mined = all_mined_vec.iter().all(|elem| *elem); if all_mined { break; } } if !all_mined { eprintln!( - "Failed to mine all transactions: nonces = {:?}, expected {:?} + {}", - &account_after_nonces, account_before_nonces, batch_size + "Failed to mine all transactions: nonces = {account_after_nonces:?}, expected {account_before_nonces:?} + {batch_size}" ); panic!(); } @@ -8450,7 +8334,7 @@ fn atlas_stress_integration_test() { // (stx-to-burn uint)) let namespace = "passport"; let salt = "some-salt"; - let salted_namespace = format!("{}{}", namespace, salt); + let salted_namespace = format!("{namespace}{salt}"); let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); let tx_1 = make_contract_call( &user_1, @@ -8466,14 +8350,14 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_1.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -8548,7 +8432,7 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -8626,14 +8510,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8660,8 +8544,7 @@ fn atlas_stress_integration_test() { } if !all_mined { eprintln!( - "Failed to mine all transactions: nonce = {}, expected {}", - account_after_nonce, + "Failed to mine all transactions: nonce = {account_after_nonce}, expected {}", account_before.nonce + (batch_size as u64) ); panic!(); @@ -8682,14 +8565,14 @@ fn atlas_stress_integration_test() { &[Value::buff_from(namespace.as_bytes().to_vec()).unwrap()], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_4.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8723,7 +8606,7 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let fqn = format!("janedoe{}.passport", j); + let fqn = format!("janedoe{j}.passport"); let fqn_bytes = fqn.as_bytes().to_vec(); let salt = format!("{:04x}", j); let salt_bytes = salt.as_bytes().to_vec(); @@ -8746,7 +8629,7 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -8755,9 +8638,8 @@ fn atlas_stress_integration_test() { .unwrap(); eprintln!( - "sent preorder for {}:\n{:#?}", - &to_addr(&users[batches * batch_size + j]), - res + "sent preorder for {}:\n{res:#?}", + &to_addr(&users[batches * batch_size + j]) ); if !res.status().is_success() { panic!(""); @@ -8784,10 +8666,10 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let salt = format!("{:04x}", j); + let name = format!("janedoe{j}"); + let salt = format!("{j:04x}"); - let zonefile_hex = format!("facade01{:04x}", j); + let zonefile_hex = format!("facade01{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8816,14 +8698,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8850,8 +8732,8 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let zonefile_hex = format!("facade02{:04x}", j); + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade02{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8879,14 +8761,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8913,8 +8795,8 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let zonefile_hex = format!("facade03{:04x}", j); + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade03{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8945,14 +8827,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8984,8 +8866,8 @@ fn atlas_stress_integration_test() { &[ibh], ) .unwrap(); - if indexes.len() > 0 { - attachment_indexes.insert(ibh.clone(), indexes.clone()); + if !indexes.is_empty() { + attachment_indexes.insert(*ibh, indexes.clone()); } for index in indexes.iter() { @@ -8995,14 +8877,14 @@ fn atlas_stress_integration_test() { params![ibh, u64_to_sql(*index).unwrap()], "content_hash") .unwrap(); - if hashes.len() > 0 { + if !hashes.is_empty() { assert_eq!(hashes.len(), 1); - attachment_hashes.insert((ibh.clone(), *index), hashes.pop()); + attachment_hashes.insert((*ibh, *index), hashes.pop()); } } } } - eprintln!("attachment_indexes = {:?}", &attachment_indexes); + eprintln!("attachment_indexes = {attachment_indexes:?}"); let max_request_time_ms = 100; @@ -9017,12 +8899,10 @@ fn atlas_stress_integration_test() { ..cmp::min((i + 1) * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, l)] .to_vec(); let path = format!( - "{}/v2/attachments/inv?index_block_hash={}&pages_indexes={}", - &http_origin, - ibh, + "{http_origin}/v2/attachments/inv?index_block_hash={ibh}&pages_indexes={}", attachments_batch .iter() - .map(|a| format!("{}", &a)) + .map(|a| format!("{a}")) .collect::>() .join(",") ); @@ -9034,40 +8914,34 @@ fn atlas_stress_integration_test() { if res.status().is_success() { let attachment_inv_response: GetAttachmentsInvResponse = res.json().unwrap(); - eprintln!( - "attachment inv response for {}: {:?}", - &path, &attachment_inv_response - ); + eprintln!("attachment inv response for {path}: {attachment_inv_response:?}"); } else { - eprintln!("Bad response for `{}`: `{:?}`", &path, res.text().unwrap()); + eprintln!("Bad response for `{path}`: `{:?}`", res.text().unwrap()); panic!(); } } let ts_end = get_epoch_time_ms(); let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); // requests should take no more than max_request_time_ms assert!( total_time < attempts * max_request_time_ms, - "Atlas inventory request is too slow: {} >= {} * {}", - total_time, - attempts, - max_request_time_ms + "Atlas inventory request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" ); } - for i in 0..l { - if attachments[i] == 0 { + for attachment in attachments.iter().take(l) { + if *attachment == 0 { continue; } let content_hash = attachment_hashes - .get(&(*ibh, attachments[i])) + .get(&(*ibh, *attachment)) .cloned() .unwrap() .unwrap(); - let path = format!("{}/v2/attachments/{}", &http_origin, &content_hash); + let path = format!("{http_origin}/v2/attachments/{content_hash}"); let attempts = 10; let ts_begin = get_epoch_time_ms(); @@ -9076,26 +8950,20 @@ fn atlas_stress_integration_test() { if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); - eprintln!( - "attachment response for {}: {:?}", - &path, &attachment_response - ); + eprintln!("attachment response for {path}: {attachment_response:?}"); } else { - eprintln!("Bad response for `{}`: `{:?}`", &path, res.text().unwrap()); + eprintln!("Bad response for `{path}`: `{:?}`", res.text().unwrap()); panic!(); } } let ts_end = get_epoch_time_ms(); let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); // requests should take no more than max_request_time_ms assert!( total_time < attempts * max_request_time_ms, - "Atlas chunk request is too slow: {} >= {} * {}", - total_time, - attempts, - max_request_time_ms + "Atlas chunk request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" ); } } @@ -9129,8 +8997,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value (unwrap! (increment) (err u1)) (unwrap! (increment) (err u1)) (ok (var-get counter)))) - "# - .to_string(); + "#; let spender_sk = StacksPrivateKey::new(); let spender_addr = to_addr(&spender_sk); @@ -9144,7 +9011,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value conf.estimation.fee_rate_window_size = window_size; conf.initial_balances.push(InitialBalance { - address: spender_addr.clone().into(), + address: spender_addr.into(), amount: 10000000000, }); test_observer::spawn(); @@ -9181,7 +9048,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value 110000, conf.burnchain.chain_id, "increment-contract", - &max_contract_src, + max_contract_src, ), ); run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 212, &conf); @@ -9198,7 +9065,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value i, // nonce i * 100000, // payment conf.burnchain.chain_id, - &spender_addr.into(), + &spender_addr, "increment-contract", "increment-many", &[], @@ -9213,12 +9080,12 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value { // Read from the fee estimation endpoin. - let path = format!("{}/v2/fees/transaction", &http_origin); + let path = format!("{http_origin}/v2/fees/transaction"); let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { - address: spender_addr.clone().into(), - contract_name: ContractName::try_from("increment-contract").unwrap(), - function_name: ClarityName::try_from("increment-many").unwrap(), + address: spender_addr, + contract_name: ContractName::from("increment-contract"), + function_name: ClarityName::from("increment-many"), function_args: vec![], }); @@ -9255,8 +9122,8 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value let last_cost = response_estimated_costs[i - 1]; assert_eq!(curr_cost, last_cost); - let curr_rate = response_top_fee_rates[i] as f64; - let last_rate = response_top_fee_rates[i - 1] as f64; + let curr_rate = response_top_fee_rates[i]; + let last_rate = response_top_fee_rates[i - 1]; assert!(curr_rate >= last_rate); } @@ -9438,7 +9305,7 @@ fn use_latest_tip_integration_test() { let client = reqwest::blocking::Client::new(); // Post the microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -9452,7 +9319,7 @@ fn use_latest_tip_integration_test() { // Wait for the microblock to be accepted sleep_ms(5_000); - let path = format!("{}/v2/info", &http_origin); + let path = format!("{http_origin}/v2/info"); let mut iter_count = 0; loop { let tip_info = client @@ -9594,26 +9461,26 @@ fn test_flash_block_skip_tenure() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // fault injection: force tenures to take too long - std::env::set_var("STX_TEST_SLOW_TENURE".to_string(), "11000".to_string()); + std::env::set_var("STX_TEST_SLOW_TENURE", "11000"); for i in 0..10 { // build one bitcoin block every 10 seconds - eprintln!("Build bitcoin block +{}", i); + eprintln!("Build bitcoin block +{i}"); btc_regtest_controller.build_next_block(1); sleep_ms(10000); } // at least one tenure was skipped let num_skipped = missed_tenures.load(Ordering::SeqCst); - eprintln!("Skipped {} tenures", &num_skipped); + eprintln!("Skipped {num_skipped} tenures"); assert!(num_skipped > 1); // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); - eprintln!("account = {:?}", &account); + eprintln!("account = {account:?}"); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 2); @@ -9696,15 +9563,15 @@ fn test_problematic_txs_are_not_stored() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: spender_addr_1.clone(), + address: spender_addr_1, amount: 1_000_000_000_000, }); conf.initial_balances.push(InitialBalance { - address: spender_addr_2.clone(), + address: spender_addr_2, amount: 1_000_000_000_000, }); conf.initial_balances.push(InitialBalance { - address: spender_addr_3.clone(), + address: spender_addr_3, amount: 1_000_000_000_000, }); @@ -9772,7 +9639,7 @@ fn test_problematic_txs_are_not_stored() { let exceeds_repeat_factor = edge_repeat_factor + 1; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -9790,7 +9657,7 @@ fn test_problematic_txs_are_not_stored() { let high_repeat_factor = 128 * 1024; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -9840,25 +9707,24 @@ fn test_problematic_txs_are_not_stored() { fn find_new_files(dirp: &str, prev_files: &HashSet) -> (Vec, HashSet) { let dirpp = Path::new(dirp); - debug!("readdir {}", dirp); + debug!("readdir {dirp}"); let cur_files = fs::read_dir(dirp).unwrap(); let mut new_files = vec![]; let mut cur_files_set = HashSet::new(); for cur_file in cur_files.into_iter() { let cur_file = cur_file.unwrap(); let cur_file_fullpath = dirpp.join(cur_file.path()).to_str().unwrap().to_string(); - test_debug!("file in {}: {}", dirp, &cur_file_fullpath); + test_debug!("file in {dirp}: {cur_file_fullpath}"); cur_files_set.insert(cur_file_fullpath.clone()); if prev_files.contains(&cur_file_fullpath) { - test_debug!("already contains {}", &cur_file_fullpath); + test_debug!("already contains {cur_file_fullpath}"); continue; } - test_debug!("new file {}", &cur_file_fullpath); + test_debug!("new file {cur_file_fullpath}"); new_files.push(cur_file_fullpath); } debug!( - "Checked {} for new files; found {} (all: {})", - dirp, + "Checked {dirp} for new files; found {} (all: {})", new_files.len(), cur_files_set.len() ); @@ -9894,8 +9760,7 @@ fn spawn_follower_node( conf.initial_balances = initial_conf.initial_balances.clone(); conf.burnchain.epochs = initial_conf.burnchain.epochs.clone(); - conf.burnchain.ast_precheck_size_height = - initial_conf.burnchain.ast_precheck_size_height.clone(); + conf.burnchain.ast_precheck_size_height = initial_conf.burnchain.ast_precheck_size_height; conf.connection_options.inv_sync_interval = 3; @@ -9923,12 +9788,12 @@ fn test_problematic_blocks_are_not_mined() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_mined"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -10000,7 +9865,7 @@ fn test_problematic_blocks_are_not_mined() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -10018,7 +9883,7 @@ fn test_problematic_blocks_are_not_mined() { let high_repeat_factor = 3200; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -10054,20 +9919,11 @@ fn test_problematic_blocks_are_not_mined() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10116,7 +9972,7 @@ fn test_problematic_blocks_are_not_mined() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10125,31 +9981,25 @@ fn test_problematic_blocks_are_not_mined() { assert_eq!(cur_ast_rules, ASTRules::Typical); // add another bad tx to the mempool - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); btc_regtest_controller.build_next_block(1); // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for blocks to be processed"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10162,7 +10012,7 @@ fn test_problematic_blocks_are_not_mined() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic blocks for _i in 0..6 { @@ -10185,10 +10035,8 @@ fn test_problematic_blocks_are_not_mined() { // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) - for txid in &[&tx_high_txid] { - test_debug!("Problematic tx {} should be dropped", txid); - assert!(get_unconfirmed_tx(&http_origin, txid).is_none()); - } + test_debug!("Problematic tx {tx_high_txid} should be dropped"); + assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); // no block contained the tx_high bad transaction, ever let blocks = test_observer::get_blocks(); @@ -10233,8 +10081,7 @@ fn test_problematic_blocks_are_not_mined() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -10278,12 +10125,12 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_relayed_or_stored"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -10355,7 +10202,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -10372,7 +10219,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let high_repeat_factor = 70; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -10408,20 +10255,11 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10470,7 +10308,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10484,14 +10322,14 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { loop { sleep_ms(1_000); let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); if new_tip.block_height > tip.block_height { break; } } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10510,7 +10348,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10521,23 +10359,17 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // add another bad tx to the mempool. // because the miner is now non-conformant, it should mine this tx. - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic blocks for _i in 0..6 { @@ -10549,7 +10381,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10616,10 +10448,8 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { break; } eprintln!( - "\nFollower is at burn block {} stacks block {} (bad_block is {})\n", - follower_tip_info.burn_block_height, - follower_tip_info.stacks_tip_height, - bad_block_height + "\nFollower is at burn block {} stacks block {} (bad_block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); sleep_ms(1000); } @@ -10627,8 +10457,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -10669,12 +10498,12 @@ fn test_problematic_microblocks_are_not_mined() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_mined"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -10752,7 +10581,7 @@ fn test_problematic_microblocks_are_not_mined() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish_microblock_only( &spender_sk_2, @@ -10767,11 +10596,10 @@ fn test_problematic_microblocks_are_not_mined() { .txid(); // something stupidly high over the expression depth - let high_repeat_factor = - (AST_CALL_STACK_DEPTH_BUFFER as u64) + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish_microblock_only( &spender_sk_3, @@ -10807,24 +10635,12 @@ fn test_problematic_microblocks_are_not_mined() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + info!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); - info!( - "Submitted problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); + info!("Submitted problematic tx_exceeds transaction {tx_exceeds_txid}"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10876,7 +10692,7 @@ fn test_problematic_microblocks_are_not_mined() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10885,39 +10701,27 @@ fn test_problematic_microblocks_are_not_mined() { assert_eq!(cur_ast_rules, ASTRules::Typical); // add another bad tx to the mempool - info!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + info!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); - info!( - "Submitted problematic tx_high transaction {}", - &tx_high_txid - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); + info!("Submitted problematic tx_high transaction {tx_high_txid}"); btc_regtest_controller.build_next_block(1); - info!( - "Mined block after submitting problematic tx_high transaction {}", - &tx_high_txid - ); + info!("Mined block after submitting problematic tx_high transaction {tx_high_txid}"); // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for runloop to advance"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10930,7 +10734,7 @@ fn test_problematic_microblocks_are_not_mined() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some microblocks, and log problematic microblocks for _i in 0..6 { @@ -10956,10 +10760,8 @@ fn test_problematic_microblocks_are_not_mined() { // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) - for txid in &[&tx_high_txid] { - test_debug!("Problematic tx {} should be dropped", txid); - assert!(get_unconfirmed_tx(&http_origin, txid).is_none()); - } + test_debug!("Problematic tx {tx_high_txid} should be dropped"); + assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); // no microblock contained the tx_high bad transaction, ever let microblocks = test_observer::get_microblocks(); @@ -11004,8 +10806,7 @@ fn test_problematic_microblocks_are_not_mined() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -11049,12 +10850,12 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_relayed_or_stored"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -11134,7 +10935,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish_microblock_only( &spender_sk_2, @@ -11149,11 +10950,10 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { .txid(); // greatly exceeds AST depth, but is still mineable without a stack overflow - let high_repeat_factor = - (AST_CALL_STACK_DEPTH_BUFFER as u64) + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish_microblock_only( &spender_sk_3, @@ -11189,20 +10989,11 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -11254,7 +11045,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -11267,14 +11058,14 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for runloop to advance"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11293,7 +11084,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11304,24 +11095,18 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // add another bad tx to the mempool. // because the miner is now non-conformant, it should mine this tx. - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic microblocks for _i in 0..6 { @@ -11333,7 +11118,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11356,7 +11141,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // at least one was problematic. // the miner might make multiple microblocks (only some of which are confirmed), so also check // the event observer to see that we actually picked up tx_high - assert!(all_new_files.len() >= 1); + assert!(!all_new_files.is_empty()); // tx_high got mined by the miner let microblocks = test_observer::get_microblocks(); @@ -11381,8 +11166,8 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { .split("0x") .collect(); let bad_block_id_hex = parts[1]; - debug!("bad_block_id_hex = '{}'", &bad_block_id_hex); - Some(StacksBlockId::from_hex(&bad_block_id_hex).unwrap()) + debug!("bad_block_id_hex = '{bad_block_id_hex}'"); + Some(StacksBlockId::from_hex(bad_block_id_hex).unwrap()) }; } } @@ -11420,8 +11205,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -11570,9 +11354,8 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st let full_iters_code = full_iters_code_parts.join("\n "); - let iters_mod_code_parts: Vec = (0..iters_mod) - .map(|cnt| format!("0x{:0>2x}", cnt)) - .collect(); + let iters_mod_code_parts: Vec = + (0..iters_mod).map(|cnt| format!("0x{cnt:0>2x}")).collect(); let iters_mod_code = format!("(list {})", iters_mod_code_parts.join(" ")); @@ -11599,7 +11382,7 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st (define-private (crash-me-folder (input (buff 1)) (ctr uint)) (begin ;; full_iters_code - {} + {full_iters_code} (+ u1 ctr) ) ) @@ -11608,20 +11391,17 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st ;; call index-of (iters_256 * 256) times (fold crash-me-folder BUFF_TO_BYTE u0) ;; call index-of iters_mod times - (fold crash-me-folder {} u0) + (fold crash-me-folder {iters_mod_code} u0) (print name) (ok u0) ) ) (begin - (crash-me \"{}\")) + (crash-me \"large-{nonce}-{addr_prefix}-{num_index_of}\")) ", - full_iters_code, - iters_mod_code, - &format!("large-{}-{}-{}", nonce, &addr_prefix, num_index_of) ); - eprintln!("{}", &code); + eprintln!("{code}"); code } @@ -11636,13 +11416,14 @@ pub fn make_expensive_tx_chain( chain_id: u32, mblock_only: bool, ) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; + let num_index_of = 256; for nonce in 0..25 { let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("large-{}-{}-{}", nonce, &addr_prefix, 256); - eprintln!("Make tx {}", &contract_name); + let contract_name = format!("large-{nonce}-{addr_prefix}-{num_index_of}"); + eprintln!("Make tx {contract_name}"); let tx = if mblock_only { make_contract_publish_microblock_only( privk, @@ -11650,7 +11431,7 @@ pub fn make_expensive_tx_chain( 1049230 + nonce + fee_plus, chain_id, &contract_name, - &make_runtime_sized_contract(256, nonce, &addr_prefix), + &make_runtime_sized_contract(num_index_of, nonce, &addr_prefix), ) } else { make_contract_publish( @@ -11659,7 +11440,7 @@ pub fn make_expensive_tx_chain( 1049230 + nonce + fee_plus, chain_id, &contract_name, - &make_runtime_sized_contract(256, nonce, &addr_prefix), + &make_runtime_sized_contract(num_index_of, nonce, &addr_prefix), ) }; chain.push(tx); @@ -11673,7 +11454,7 @@ pub fn make_random_tx_chain( chain_id: u32, mblock_only: bool, ) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; for nonce in 0..25 { @@ -11689,8 +11470,8 @@ pub fn make_random_tx_chain( let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("large-{}-{}-{}", nonce, &addr_prefix, random_iters); - eprintln!("Make tx {}", &contract_name); + let contract_name = format!("large-{nonce}-{addr_prefix}-{random_iters}"); + eprintln!("Make tx {contract_name}"); let tx = if mblock_only { make_contract_publish_microblock_only( privk, @@ -11716,7 +11497,7 @@ pub fn make_random_tx_chain( } fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; for nonce in 0..25 { @@ -11732,7 +11513,7 @@ fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("crct-{}-{}-{}", nonce, &addr_prefix, random_iters); + let contract_name = format!("crct-{nonce}-{addr_prefix}-{random_iters}"); eprintln!("Make tx {}", &contract_name); let tx = make_contract_publish_microblock_only( privk, @@ -11758,10 +11539,7 @@ fn test_competing_miners_build_on_same_chain( return; } - let privks: Vec<_> = (0..100) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() .map(|privk| { @@ -11808,9 +11586,8 @@ fn test_competing_miners_build_on_same_chain( confs[i].node.set_bootstrap_nodes( format!( - "{}@{}", + "{}@{p2p_bind}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind ), chain_id, peer_version, @@ -11818,8 +11595,8 @@ fn test_competing_miners_build_on_same_chain( } // use long reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let reward_cycle_len = 100; let prepare_phase_len = 20; let pox_constants = PoxConstants::new( @@ -11856,10 +11633,10 @@ fn test_competing_miners_build_on_same_chain( btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -11879,8 +11656,8 @@ fn test_competing_miners_build_on_same_chain( let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -11888,7 +11665,7 @@ fn test_competing_miners_build_on_same_chain( loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 1: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 1: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -11898,23 +11675,19 @@ fn test_competing_miners_build_on_same_chain( next_block_and_wait(&mut btc_regtest_controller, &blocks_processed[0]); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -11938,7 +11711,7 @@ fn test_competing_miners_build_on_same_chain( let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -11948,7 +11721,7 @@ fn test_competing_miners_build_on_same_chain( // mine quickly -- see if we can induce flash blocks for i in 0..1000 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); } @@ -12023,10 +11796,7 @@ fn microblock_miner_multiple_attempts() { conf.burnchain.max_rbf = 1000000; conf.node.wait_time_for_blocks = 1_000; - let privks: Vec<_> = (0..100) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() .map(|privk| { @@ -12076,7 +11846,7 @@ fn microblock_miner_multiple_attempts() { // let's query the miner's account nonce: let account = get_account(&http_origin, &miner_account); - eprintln!("Miner account: {:?}", &account); + eprintln!("Miner account: {account:?}"); let all_txs: Vec<_> = privks .iter() @@ -12085,10 +11855,9 @@ fn microblock_miner_multiple_attempts() { .collect(); let _handle = thread::spawn(move || { - for txi in 0..all_txs.len() { - for j in 0..all_txs[txi].len() { - let tx = &all_txs[txi][j]; - eprintln!("\n\nSubmit tx {},{}\n\n", txi, j); + for (i, txi) in all_txs.iter().enumerate() { + for (j, tx) in txi.iter().enumerate() { + eprintln!("\n\nSubmit tx {i},{j}\n\n"); submit_tx(&http_origin, tx); sleep_ms(1_000); } @@ -12119,12 +11888,13 @@ fn min_txs() { test_observer::spawn(); test_observer::register_any(&mut conf); + let path = "/tmp/activate_vrf_key.min_txs.json"; conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; - conf.miner.activated_vrf_key_path = Some("/tmp/activate_vrf_key.min_txs.json".to_string()); + conf.miner.activated_vrf_key_path = Some(path.to_string()); - if fs::metadata("/tmp/activate_vrf_key.min_txs.json").is_ok() { - fs::remove_file("/tmp/activate_vrf_key.min_txs.json").unwrap(); + if fs::metadata(path).is_ok() { + fs::remove_file(path).unwrap(); } let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -12176,18 +11946,18 @@ fn min_txs() { let _sort_height = channel.get_sortitions_processed(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); submit_tx(&http_origin, &publish); - debug!("Try to build too-small a block {}", &i); + debug!("Try to build too-small a block {i}"); next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 15); } @@ -12195,12 +11965,12 @@ fn min_txs() { for block in blocks { let transactions = block.get("transactions").unwrap().as_array().unwrap(); if transactions.len() > 1 { - debug!("Got block: {:?}", &block); + debug!("Got block: {block:?}"); assert!(transactions.len() >= 4); } } - let saved_vrf_key = RelayerThread::load_saved_vrf_key("/tmp/activate_vrf_key.min_txs.json"); + let saved_vrf_key = RelayerThread::load_saved_vrf_key(path); assert!(saved_vrf_key.is_some()); test_observer::clear(); @@ -12222,13 +11992,14 @@ fn filter_txs_by_type() { test_observer::spawn(); test_observer::register_any(&mut conf); + let path = "/tmp/activate_vrf_key.filter_txs.json"; conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; - conf.miner.activated_vrf_key_path = Some("/tmp/activate_vrf_key.filter_txs.json".to_string()); + conf.miner.activated_vrf_key_path = Some(path.to_string()); conf.miner.txs_to_consider = [MemPoolWalkTxTypes::TokenTransfer].into_iter().collect(); - if fs::metadata("/tmp/activate_vrf_key.filter_txs.json").is_ok() { - fs::remove_file("/tmp/activate_vrf_key.filter_txs.json").unwrap(); + if fs::metadata(path).is_ok() { + fs::remove_file(path).unwrap(); } let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -12280,13 +12051,13 @@ fn filter_txs_by_type() { let _sort_height = channel.get_sortitions_processed(); let mut sent_txids = HashSet::new(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); let parsed = StacksTransaction::consensus_deserialize(&mut &publish[..]).unwrap(); @@ -12298,7 +12069,7 @@ fn filter_txs_by_type() { let blocks = test_observer::get_blocks(); for block in blocks { - info!("block: {:?}", &block); + info!("block: {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); @@ -12313,7 +12084,7 @@ fn filter_txs_by_type() { } } - let saved_vrf_key = RelayerThread::load_saved_vrf_key("/tmp/activate_vrf_key.filter_txs.json"); + let saved_vrf_key = RelayerThread::load_saved_vrf_key(path); assert!(saved_vrf_key.is_some()); test_observer::clear(); @@ -12391,13 +12162,13 @@ fn filter_txs_by_origin() { let _sort_height = channel.get_sortitions_processed(); let mut sent_txids = HashSet::new(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); let parsed = StacksTransaction::consensus_deserialize(&mut &publish[..]).unwrap(); @@ -12409,7 +12180,7 @@ fn filter_txs_by_origin() { let blocks = test_observer::get_blocks(); for block in blocks { - info!("block: {:?}", &block); + info!("block: {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); @@ -12479,12 +12250,12 @@ fn bitcoin_reorg_flap() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // stop bitcoind and copy its DB to simulate a chain flap @@ -12496,7 +12267,7 @@ fn bitcoin_reorg_flap() { new_conf.node.working_dir = format!("{}.new", &conf.node.working_dir); fs::create_dir_all(&new_conf.node.working_dir).unwrap(); - copy_dir_all(&btcd_dir, &new_conf.get_burnchain_path_str()).unwrap(); + copy_dir_all(&btcd_dir, new_conf.get_burnchain_path_str()).unwrap(); // resume let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -12681,8 +12452,7 @@ fn bitcoin_reorg_flap_with_follower() { let mut miner_sort_height = miner_channel.get_sortitions_processed(); let mut follower_sort_height = follower_channel.get_sortitions_processed(); eprintln!( - "Miner sort height: {}, follower sort height: {}", - miner_sort_height, follower_sort_height + "Miner sort height: {miner_sort_height}, follower sort height: {follower_sort_height}" ); while miner_sort_height < 210 && follower_sort_height < 210 { @@ -12695,8 +12465,7 @@ fn bitcoin_reorg_flap_with_follower() { miner_sort_height = miner_channel.get_sortitions_processed(); follower_sort_height = miner_channel.get_sortitions_processed(); eprintln!( - "Miner sort height: {}, follower sort height: {}", - miner_sort_height, follower_sort_height + "Miner sort height: {miner_sort_height}, follower sort height: {follower_sort_height}" ); } @@ -12709,7 +12478,7 @@ fn bitcoin_reorg_flap_with_follower() { new_conf.node.working_dir = format!("{}.new", &conf.node.working_dir); fs::create_dir_all(&new_conf.node.working_dir).unwrap(); - copy_dir_all(&btcd_dir, &new_conf.get_burnchain_path_str()).unwrap(); + copy_dir_all(&btcd_dir, new_conf.get_burnchain_path_str()).unwrap(); // resume let mut btcd_controller = BitcoinCoreController::new(conf.clone()); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 42b894398d..622e31bdd6 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -123,10 +123,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest (), - G: FnMut(&mut NeonConfig) -> (), - >( + fn new_with_config_modifications( num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>, mut signer_config_modifier: F, @@ -151,8 +148,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>()); @@ -330,10 +326,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest ()>( +fn setup_stx_btc_node( mut naka_conf: NeonConfig, signer_stacks_private_keys: &[StacksPrivateKey], signer_configs: &[SignerConfig], diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 234b73684a..a704d2f2ee 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -118,13 +118,13 @@ impl SignerTest { for stacker_sk in self.signer_stacks_private_keys.iter() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &stacker_sk, + stacker_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, CHAIN_ID_TESTNET, @@ -137,7 +137,7 @@ impl SignerTest { let signer_pk = StacksPublicKey::from_private(stacker_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, self.running_nodes.conf.burnchain.chain_id, @@ -246,7 +246,7 @@ impl SignerTest { .get_reward_set_signers(reward_cycle) .expect("Failed to check if reward set is calculated") .map(|reward_set| { - debug!("Signer set: {:?}", reward_set); + debug!("Signer set: {reward_set:?}"); }) .is_some()) }) @@ -304,10 +304,7 @@ impl SignerTest { // NOTE: signature.len() does not need to equal signers.len(); the stacks miner can finish the block // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); - info!( - "Verifying signatures against signers for reward cycle {:?}", - reward_cycle - ); + info!("Verifying signatures against signers for reward cycle {reward_cycle:?}"); let signers = self.get_reward_set_signers(reward_cycle); // Verify that the signers signed the proposed block @@ -789,7 +786,7 @@ fn reloads_signer_set_in() { let send_fee = 180; let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |_config| {}, |_| {}, None, @@ -848,7 +845,7 @@ fn reloads_signer_set_in() { } }; if let Some(ref set) = reward_set { - info!("Signer set: {:?}", set); + info!("Signer set: {set:?}"); } Ok(reward_set.is_some()) }) @@ -912,7 +909,7 @@ fn forked_tenure_testing( let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |config| { // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; @@ -1030,7 +1027,7 @@ fn forked_tenure_testing( .nakamoto_blocks_db() .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() - .get(0) + .first() .cloned() .unwrap(); @@ -1038,14 +1035,14 @@ fn forked_tenure_testing( let tip_b = StacksHeaderInfo { anchored_header: StacksBlockHeaderTypes::Nakamoto(tip_b_block.header.clone()), microblock_tail: None, - stacks_block_height: tip_b_block.header.chain_length.into(), + stacks_block_height: tip_b_block.header.chain_length, index_root: TrieHash([0x00; 32]), // we can't know this yet since the block hasn't been processed - consensus_hash: tip_b_block.header.consensus_hash.clone(), - burn_header_hash: tip_sn.burn_header_hash.clone(), + consensus_hash: tip_b_block.header.consensus_hash, + burn_header_hash: tip_sn.burn_header_hash, burn_header_height: tip_sn.block_height as u32, burn_header_timestamp: tip_sn.burn_header_timestamp, anchored_block_size: tip_b_block.serialize_to_vec().len() as u64, - burn_view: Some(tip_b_block.header.consensus_hash.clone()), + burn_view: Some(tip_b_block.header.consensus_hash), }; let blocks = test_observer::get_mined_nakamoto_blocks(); @@ -1227,10 +1224,8 @@ fn bitcoind_forking_test() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); let miner_address = Keychain::default(conf.node.seed.clone()) @@ -1339,7 +1334,7 @@ fn bitcoind_forking_test() { let post_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; - assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 1 * 2); + assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 2); for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); @@ -1466,7 +1461,7 @@ fn multiple_miners() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1504,7 +1499,7 @@ fn multiple_miners() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let conf = signer_test.running_nodes.conf.clone(); @@ -1583,10 +1578,7 @@ fn multiple_miners() { let info_1 = get_chain_info(&conf); let info_2 = get_chain_info(&conf_node_2); - info!( - "Issue next block-build request\ninfo 1: {:?}\ninfo 2: {:?}\n", - &info_1, &info_2 - ); + info!("Issue next block-build request\ninfo 1: {info_1:?}\ninfo 2: {info_2:?}\n"); signer_test.mine_block_wait_on_processing( &[&rl1_coord_channels, &rl2_coord_channels], @@ -1597,10 +1589,8 @@ fn multiple_miners() { btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf); // for this test, there should be one block per tenure - let consensus_hash_set: HashSet<_> = blocks - .iter() - .map(|header| header.consensus_hash.clone()) - .collect(); + let consensus_hash_set: HashSet<_> = + blocks.iter().map(|header| header.consensus_hash).collect(); assert_eq!( consensus_hash_set.len(), blocks.len(), @@ -1667,14 +1657,7 @@ fn get_nakamoto_headers(config: &Config) -> Vec { let nakamoto_block_ids: HashSet<_> = test_observer::get_blocks() .into_iter() .filter_map(|block_json| { - if block_json - .as_object() - .unwrap() - .get("miner_signature") - .is_none() - { - return None; - } + block_json.as_object().unwrap().get("miner_signature")?; let block_id = StacksBlockId::from_hex( &block_json .as_object() @@ -1753,7 +1736,7 @@ fn miner_forking() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1795,7 +1778,7 @@ fn miner_forking() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let conf = signer_test.running_nodes.conf.clone(); @@ -1816,7 +1799,7 @@ fn miner_forking() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -1904,7 +1887,7 @@ fn miner_forking() { TEST_BROADCAST_STALL.lock().unwrap().replace(false); // wait for a block to be processed (or timeout!) - if let Err(_) = wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)) { + if wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)).is_err() { info!("Timeout waiting for a block process: assuming this is because RL2 attempted to fork-- will check at end of test"); return (sort_tip, false); } @@ -1946,7 +1929,7 @@ fn miner_forking() { .into_iter() .map(|header| { info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %sortition_data.consensus_hash); - (header.consensus_hash.clone(), header) + (header.consensus_hash, header) }) .collect(); @@ -1985,9 +1968,7 @@ fn miner_forking() { expects_miner_2_to_be_valid = false; } else { info!("Sortition without tenure"; "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); - assert!(nakamoto_headers - .get(&sortition_data.consensus_hash) - .is_none()); + assert!(!nakamoto_headers.contains_key(&sortition_data.consensus_hash)); assert!(!expects_miner_2_to_be_valid, "If no blocks were produced in the tenure, it should be because miner 2 committed to a fork"); won_by_miner_2_but_no_tenure = true; expects_miner_2_to_be_valid = true; @@ -2034,10 +2015,8 @@ fn end_of_tenure() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let long_timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(20); @@ -2185,10 +2164,8 @@ fn retry_on_rejection() { let send_fee = 180; let short_timeout = Duration::from_secs(30); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * 3)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 3)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -2198,7 +2175,7 @@ fn retry_on_rejection() { let sortdb = burnchain.open_sortition_db(true).unwrap(); wait_for(30, || { - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(tip.sortition) }) .expect("Timed out waiting for sortition"); @@ -2324,10 +2301,8 @@ fn signers_broadcast_signed_blocks() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -2345,8 +2320,8 @@ fn signers_broadcast_signed_blocks() { .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); debug!( - "blocks_mined: {},{}, stacks_tip_height: {},{}", - blocks_mined, blocks_before, info.stacks_tip_height, info_before.stacks_tip_height + "blocks_mined: {blocks_mined},{blocks_before}, stacks_tip_height: {},{}", + info.stacks_tip_height, info_before.stacks_tip_height ); Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) }) @@ -2388,11 +2363,7 @@ fn signers_broadcast_signed_blocks() { .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); debug!( - "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", - blocks_mined, - blocks_before, - signer_pushed, - signer_pushed_before, + "blocks_mined: {blocks_mined},{blocks_before}, signers_pushed: {signer_pushed},{signer_pushed_before}, stacks_tip_height: {},{}", info.stacks_tip_height, info_before.stacks_tip_height ); @@ -2432,7 +2403,7 @@ fn empty_sortition() { let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |config| { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; @@ -2613,7 +2584,7 @@ fn mock_sign_epoch_25() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |_| {}, |node_config| { node_config.miner.pre_nakamoto_mock_signing = true; @@ -2763,7 +2734,7 @@ fn multiple_miners_mock_sign_epoch_25() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -2807,16 +2778,16 @@ fn multiple_miners_mock_sign_epoch_25() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.seed = btc_miner_2_seed.clone(); conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); @@ -2829,7 +2800,7 @@ fn multiple_miners_mock_sign_epoch_25() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -2953,17 +2924,13 @@ fn signer_set_rollover() { let new_num_signers = 4; let new_signer_private_keys: Vec<_> = (0..new_num_signers) - .into_iter() .map(|_| StacksPrivateKey::new()) .collect(); let new_signer_public_keys: Vec<_> = new_signer_private_keys .iter() .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) .collect(); - let new_signer_addresses: Vec<_> = new_signer_private_keys - .iter() - .map(|sk| tests::to_addr(sk)) - .collect(); + let new_signer_addresses: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; @@ -2972,15 +2939,15 @@ fn signer_set_rollover() { let mut initial_balances = new_signer_addresses .iter() - .map(|addr| (addr.clone(), POX_4_DEFAULT_STACKER_BALANCE)) + .map(|addr| (*addr, POX_4_DEFAULT_STACKER_BALANCE)) .collect::>(); - initial_balances.push((sender_addr.clone(), (send_amt + send_fee) * 4)); + initial_balances.push((sender_addr, (send_amt + send_fee) * 4)); let run_stamp = rand::random(); let rpc_port = 51024; - let rpc_bind = format!("127.0.0.1:{}", rpc_port); + let rpc_bind = format!("127.0.0.1:{rpc_port}"); // Setup the new signers that will take over let new_signer_configs = build_signer_config_tomls( @@ -2997,12 +2964,11 @@ fn signer_set_rollover() { None, ); - let new_spawned_signers: Vec<_> = (0..new_num_signers) - .into_iter() - .map(|i| { + let new_spawned_signers: Vec<_> = new_signer_configs + .iter() + .map(|conf| { info!("spawning signer"); - let signer_config = - SignerConfig::load_from_str(&new_signer_configs[i as usize]).unwrap(); + let signer_config = SignerConfig::load_from_str(conf).unwrap(); SpawnedSigner::new(signer_config) }) .collect(); @@ -3047,7 +3013,7 @@ fn signer_set_rollover() { // Verify that naka_conf has our new signer's event observers for toml in &new_signer_configs { - let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + let signer_config = SignerConfig::load_from_str(toml).unwrap(); let endpoint = format!("{}", signer_config.endpoint); assert!(signer_test .running_nodes @@ -3072,7 +3038,7 @@ fn signer_set_rollover() { info!("---- Verifying that the current signers are the old signers ----"); let current_signers = signer_test.get_reward_set_signers(reward_cycle); - assert_eq!(current_signers.len(), num_signers as usize); + assert_eq!(current_signers.len(), num_signers); // Verify that the current signers are the same as the old signers for signer in current_signers.iter() { assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); @@ -3117,13 +3083,13 @@ fn signer_set_rollover() { for stacker_sk in new_signer_private_keys.iter() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &stacker_sk, + stacker_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, CHAIN_ID_TESTNET, @@ -3136,7 +3102,7 @@ fn signer_set_rollover() { let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, signer_test.running_nodes.conf.burnchain.chain_id, @@ -3190,10 +3156,7 @@ fn signer_set_rollover() { assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); } - info!( - "---- Mining to the next reward cycle (block {}) -----", - next_cycle_height - ); + info!("---- Mining to the next reward cycle (block {next_cycle_height}) -----",); signer_test.run_until_burnchain_height_nakamoto( Duration::from_secs(60), next_cycle_height, @@ -3204,7 +3167,7 @@ fn signer_set_rollover() { info!("---- Verifying that the current signers are the new signers ----"); let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); - assert_eq!(current_signers.len(), new_num_signers as usize); + assert_eq!(current_signers.len(), new_num_signers); for signer in current_signers.iter() { assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); @@ -3262,13 +3225,12 @@ fn min_gap_between_blocks() { let send_amt = 100; let send_fee = 180; - let mut sender_nonce = 0; let interim_blocks = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let time_between_blocks_ms = 10_000; let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * interim_blocks)], + vec![(sender_addr, (send_amt + send_fee) * interim_blocks)], |_config| {}, |config| { config.miner.min_time_between_blocks_ms = time_between_blocks_ms; @@ -3294,13 +3256,12 @@ fn min_gap_between_blocks() { // submit a tx so that the miner will mine an extra block let transfer_tx = make_stacks_transfer( &sender_sk, - sender_nonce, + interim_block_ix, // same as the sender nonce send_fee, signer_test.running_nodes.conf.burnchain.chain_id, &recipient, send_amt, ); - sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block to be processed"); @@ -3312,7 +3273,7 @@ fn min_gap_between_blocks() { Ok(blocks_processed > blocks_processed_before) }) .unwrap(); - info!("Mined interim block:{}", interim_block_ix); + info!("Mined interim block:{interim_block_ix}"); } wait_for(60, || { @@ -3426,7 +3387,7 @@ fn duplicate_signers() { }) .filter_map(|message| match message { SignerMessage::BlockResponse(BlockResponse::Accepted(m)) => { - info!("Message(accepted): {:?}", &m); + info!("Message(accepted): {m:?}"); Some(m) } _ => { @@ -3503,7 +3464,7 @@ fn multiple_miners_with_nakamoto_blocks() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![( - sender_addr.clone(), + sender_addr, (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { @@ -3542,7 +3503,7 @@ fn multiple_miners_with_nakamoto_blocks() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3667,10 +3628,7 @@ fn multiple_miners_with_nakamoto_blocks() { Ok(blocks_processed > blocks_processed_before) }) .unwrap(); - info!( - "Mined interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); + info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); } let blocks = get_nakamoto_headers(&conf); @@ -3681,7 +3639,7 @@ fn multiple_miners_with_nakamoto_blocks() { if seen_burn_hashes.contains(&header.burn_header_hash) { continue; } - seen_burn_hashes.insert(header.burn_header_hash.clone()); + seen_burn_hashes.insert(header.burn_header_hash); let header = header.anchored_header.as_stacks_nakamoto().unwrap(); if miner_1_pk @@ -3703,10 +3661,7 @@ fn multiple_miners_with_nakamoto_blocks() { miner_2_tenures += 1; } } - info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}", - miner_1_tenures, miner_2_tenures - ); + info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); } info!( @@ -3724,10 +3679,7 @@ fn multiple_miners_with_nakamoto_blocks() { peer_1_height, pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) ); - assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() - ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); rl2_coord_channels .lock() .expect("Mutex poisoned") @@ -3777,7 +3729,7 @@ fn partial_tenure_fork() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![( - sender_addr.clone(), + sender_addr, (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { @@ -3812,7 +3764,7 @@ fn partial_tenure_fork() { panic!("Expected epochs to be set"); } }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3833,7 +3785,7 @@ fn partial_tenure_fork() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -4057,14 +4009,11 @@ fn partial_tenure_fork() { blocks = interim_block_ix; break; } else { - panic!("Failed to submit tx: {}", e); + panic!("Failed to submit tx: {e}"); } } } - info!( - "Attempted to mine interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); + info!("Attempted to mine interim block {btc_blocks_mined}:{interim_block_ix}"); } if miner == 1 { @@ -4084,13 +4033,11 @@ fn partial_tenure_fork() { if miner == 1 { assert_eq!(mined_1, mined_before_1 + blocks + 1); + } else if miner_2_tenures < min_miner_2_tenures { + assert_eq!(mined_2, mined_before_2 + blocks + 1); } else { - if miner_2_tenures < min_miner_2_tenures { - assert_eq!(mined_2, mined_before_2 + blocks + 1); - } else { - // Miner 2 should have mined 0 blocks after the fork - assert_eq!(mined_2, mined_before_2); - } + // Miner 2 should have mined 0 blocks after the fork + assert_eq!(mined_2, mined_before_2); } } @@ -4110,10 +4057,7 @@ fn partial_tenure_fork() { // Must be at least the number of blocks mined by miner 1 and the number of blocks mined by miner 2 // before the fork was initiated assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_miner_2_blocks); - assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() - ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); let sortdb = SortitionDB::open( &conf_node_2.get_burn_db_file_path(), @@ -4179,7 +4123,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let short_timeout_secs = 20; let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); let all_signers: Vec<_> = signer_test @@ -4366,7 +4310,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); let all_signers: Vec<_> = signer_test @@ -4589,7 +4533,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); let all_signers = signer_test .signer_stacks_private_keys @@ -4803,7 +4747,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -4817,7 +4761,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let sortdb = burnchain.open_sortition_db(true).unwrap(); wait_for(30, || { - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(tip.sortition) }) .expect("Timed out waiting for sortition"); @@ -5103,10 +5047,8 @@ fn continue_after_tenure_extend() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * 5)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 5)]); let timeout = Duration::from_secs(200); let coord_channel = signer_test.running_nodes.coord_channel.clone(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -5178,17 +5120,16 @@ fn continue_after_tenure_extend() { match &parsed.payload { TransactionPayload::TenureChange(payload) if payload.cause == TenureChangeCause::Extended => {} - _ => panic!("Expected tenure extend transaction, got {:?}", parsed), + _ => panic!("Expected tenure extend transaction, got {parsed:?}"), }; // Verify that the miner can continue mining in the tenure with the tenure extend info!("------------------------- Mine After Tenure Extend -------------------------"); - let mut sender_nonce = 0; let mut blocks_processed_before = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); - for _ in 0..5 { + for sender_nonce in 0..5 { // submit a tx so that the miner will mine an extra block let transfer_tx = make_stacks_transfer( &sender_sk, @@ -5198,7 +5139,6 @@ fn continue_after_tenure_extend() { &recipient, send_amt, ); - sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal"); @@ -5280,13 +5220,13 @@ fn signing_in_0th_tenure_of_reward_cycle() { assert_eq!(signer_test.get_current_reward_cycle(), curr_reward_cycle); for signer in &signer_public_keys { - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + let blocks_signed = get_v3_signer(signer, next_reward_cycle); assert_eq!(blocks_signed, 0); } info!("------------------------- Enter Reward Cycle {next_reward_cycle} -------------------------"); for signer in &signer_public_keys { - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + let blocks_signed = get_v3_signer(signer, next_reward_cycle); assert_eq!(blocks_signed, 0); } let blocks_before = signer_test @@ -5320,7 +5260,7 @@ fn signing_in_0th_tenure_of_reward_cycle() { .unwrap() }) .expect("Unknown signer signature"); - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + let blocks_signed = get_v3_signer(signer, next_reward_cycle); assert_eq!(blocks_signed, 1); } assert_eq!(signer_test.get_current_reward_cycle(), next_reward_cycle); @@ -5363,7 +5303,7 @@ fn multiple_miners_with_custom_chain_id() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![( - sender_addr.clone(), + sender_addr, (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { @@ -5404,7 +5344,7 @@ fn multiple_miners_with_custom_chain_id() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -5428,7 +5368,7 @@ fn multiple_miners_with_custom_chain_id() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -5530,10 +5470,7 @@ fn multiple_miners_with_custom_chain_id() { Ok(blocks_processed > blocks_processed_before) }) .unwrap(); - info!( - "Mined interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); + info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); } let blocks = get_nakamoto_headers(&conf); @@ -5544,7 +5481,7 @@ fn multiple_miners_with_custom_chain_id() { if seen_burn_hashes.contains(&header.burn_header_hash) { continue; } - seen_burn_hashes.insert(header.burn_header_hash.clone()); + seen_burn_hashes.insert(header.burn_header_hash); let header = header.anchored_header.as_stacks_nakamoto().unwrap(); if miner_1_pk @@ -5566,10 +5503,7 @@ fn multiple_miners_with_custom_chain_id() { miner_2_tenures += 1; } } - info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}", - miner_1_tenures, miner_2_tenures - ); + info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); } info!( @@ -5587,10 +5521,7 @@ fn multiple_miners_with_custom_chain_id() { peer_1_height, pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) ); - assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() - ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); // Verify both nodes have the correct chain id let miner1_info = get_chain_info(&signer_test.running_nodes.conf); diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index aa620d349b..70d820fbb1 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -41,14 +41,13 @@ fn post_stackerdb_chunk( slot_version: u32, ) -> StackerDBChunkAckData { let mut chunk = StackerDBChunkData::new(slot_id, slot_version, data); - chunk.sign(&signer).unwrap(); + chunk.sign(signer).unwrap(); let chunk_body = serde_json::to_string(&chunk).unwrap(); let client = reqwest::blocking::Client::new(); let path = format!( - "{}/v2/stackerdb/{}/{}/chunks", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/chunks", &StacksAddress::from(stackerdb_contract_id.issuer.clone()), stackerdb_contract_id.name ); @@ -60,8 +59,8 @@ fn post_stackerdb_chunk( .unwrap(); if res.status().is_success() { let ack: StackerDBChunkAckData = res.json().unwrap(); - info!("Got stackerdb ack: {:?}", &ack); - return ack; + info!("Got stackerdb ack: {ack:?}"); + ack } else { eprintln!("StackerDB post error: {}", res.text().unwrap()); panic!(""); @@ -76,20 +75,15 @@ fn get_stackerdb_chunk( ) -> Vec { let path = if let Some(version) = slot_version { format!( - "{}/v2/stackerdb/{}/{}/{}/{}", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/{slot_id}/{version}", StacksAddress::from(stackerdb_contract_id.issuer.clone()), stackerdb_contract_id.name, - slot_id, - version ) } else { format!( - "{}/v2/stackerdb/{}/{}/{}", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/{slot_id}", StacksAddress::from(stackerdb_contract_id.issuer.clone()), - stackerdb_contract_id.name, - slot_id + stackerdb_contract_id.name ) }; @@ -97,8 +91,7 @@ fn get_stackerdb_chunk( let res = client.get(&path).send().unwrap(); if res.status().is_success() { - let chunk_data: Vec = res.bytes().unwrap().to_vec(); - return chunk_data; + res.bytes().unwrap().to_vec() } else { eprintln!("Get chunk error: {}", res.text().unwrap()); panic!(""); @@ -115,7 +108,7 @@ fn test_stackerdb_load_store() { let (mut conf, _) = neon_integration_test_conf(); test_observer::register_any(&mut conf); - let privks = vec![ + let privks = [ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R StacksPrivateKey::from_hex( "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", @@ -223,18 +216,18 @@ fn test_stackerdb_load_store() { // write some chunks and read them back for i in 0..3 { - let chunk_str = format!("Hello chunks {}", &i); + let chunk_str = format!("Hello chunks {i}"); let ack = post_stackerdb_chunk( &http_origin, &contract_id, chunk_str.as_bytes().to_vec(), &privks[0], 0, - (i + 1) as u32, + i + 1, ); - debug!("ACK: {:?}", &ack); + debug!("ACK: {ack:?}"); - let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, Some((i + 1) as u32)); + let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, Some(i + 1)); assert_eq!(data, chunk_str.as_bytes().to_vec()); let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, None); @@ -252,7 +245,7 @@ fn test_stackerdb_event_observer() { let (mut conf, _) = neon_integration_test_conf(); test_observer::register(&mut conf, &[EventKeyType::StackerDBChunks]); - let privks = vec![ + let privks = [ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R StacksPrivateKey::from_hex( "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", @@ -383,11 +376,10 @@ fn test_stackerdb_event_observer() { // get events, verifying that they're all for the same contract (i.e. this one) let stackerdb_events: Vec<_> = test_observer::get_stackerdb_chunks() .into_iter() - .map(|stackerdb_event| { + .flat_map(|stackerdb_event| { assert_eq!(stackerdb_event.contract_id, contract_id); stackerdb_event.modified_slots }) - .flatten() .collect(); assert_eq!(stackerdb_events.len(), 6); @@ -396,7 +388,7 @@ fn test_stackerdb_event_observer() { assert_eq!(i as u32, event.slot_id); assert_eq!(event.slot_version, 1); - let expected_data = format!("Hello chunks {}", &i); + let expected_data = format!("Hello chunks {i}"); let expected_hash = Sha512Trunc256Sum::from_data(expected_data.as_bytes()); assert_eq!(event.data, expected_data.as_bytes().to_vec()); From 693b5daaa08d4275dfbffdd3a0a608e935d9ef7c Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sat, 2 Nov 2024 17:39:07 -0400 Subject: [PATCH 053/109] test: improvements to empty sortition tests --- testnet/stacks-node/src/tests/signer/v0.rs | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5463d9fd6a..2f558bf890 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2728,13 +2728,9 @@ fn empty_sortition_before_approval() { info!("Found tenure extend block"); return Ok(true); } - TenureChangeCause::BlockFound => { - info!("Found block with tenure change"); - } + TenureChangeCause::BlockFound => {} }, - payload => { - info!("Found tx with payload: {:?}", payload); - } + _ => {} }; } Ok(false) @@ -2834,13 +2830,21 @@ fn empty_sortition_before_proposal() { info!("Pause miner so it doesn't propose a block before the next tenure arrives"); TEST_MINE_STALL.lock().unwrap().replace(true); + let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + info!("------------------------- Test Mine Tenure A and B -------------------------"); signer_test .running_nodes .btc_regtest_controller .build_next_block(2); - // Sleep to ensure the signers see both burn blocks + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.burn_block_height == burn_height_before + 2) + }) + .expect("Failed to advance chain tip"); + + // Sleep a bit more to ensure the signers see both burn blocks sleep_ms(5_000); info!("Unpause miner"); @@ -2882,13 +2886,9 @@ fn empty_sortition_before_proposal() { info!("Found tenure extend block"); return Ok(true); } - TenureChangeCause::BlockFound => { - info!("Found block with tenure change"); - } + TenureChangeCause::BlockFound => {} }, - payload => { - info!("Found tx with payload: {:?}", payload); - } + _ => {} }; } Ok(false) From 3c812cf74c0e0c961611a938a1dce10bfbb35bac Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Sat, 2 Nov 2024 15:09:50 -0700 Subject: [PATCH 054/109] Fix logs to use inplace formatting in stacks node Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/stacks/tests/mod.rs | 58 +-- .../burnchains/bitcoin_regtest_controller.rs | 134 +++--- testnet/stacks-node/src/chain_data.rs | 33 +- testnet/stacks-node/src/config.rs | 114 ++--- testnet/stacks-node/src/event_dispatcher.rs | 53 +-- testnet/stacks-node/src/globals.rs | 7 +- testnet/stacks-node/src/keychain.rs | 2 +- testnet/stacks-node/src/main.rs | 54 +-- .../stacks-node/src/monitoring/prometheus.rs | 5 +- testnet/stacks-node/src/nakamoto_node.rs | 18 +- .../stacks-node/src/nakamoto_node/miner.rs | 31 +- testnet/stacks-node/src/nakamoto_node/peer.rs | 5 +- .../stacks-node/src/nakamoto_node/relayer.rs | 62 +-- .../src/nakamoto_node/sign_coordinator.rs | 13 +- testnet/stacks-node/src/neon_node.rs | 302 +++++------- testnet/stacks-node/src/node.rs | 21 +- testnet/stacks-node/src/operations.rs | 2 +- testnet/stacks-node/src/run_loop/mod.rs | 2 +- testnet/stacks-node/src/run_loop/nakamoto.rs | 37 +- testnet/stacks-node/src/run_loop/neon.rs | 80 ++-- testnet/stacks-node/src/stacks_events.rs | 14 +- testnet/stacks-node/src/syncctl.rs | 39 +- .../stacks-node/src/tests/bitcoin_regtest.rs | 3 +- testnet/stacks-node/src/tests/epoch_21.rs | 446 ++++++++---------- testnet/stacks-node/src/tests/epoch_22.rs | 100 ++-- testnet/stacks-node/src/tests/epoch_23.rs | 9 +- testnet/stacks-node/src/tests/epoch_24.rs | 10 +- testnet/stacks-node/src/tests/integrations.rs | 2 +- .../src/tests/neon_integrations.rs | 45 +- testnet/stacks-node/src/tests/signer/v0.rs | 4 +- testnet/stacks-node/src/tests/stackerdb.rs | 4 +- 31 files changed, 727 insertions(+), 982 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 6e6fdfd8f7..9a6a84507e 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -81,7 +81,7 @@ pub fn path_join(dir: &str, path: &str) -> String { // copy src to dest pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { - eprintln!("Copy directory {} to {}", src_dir, dest_dir); + eprintln!("Copy directory {src_dir} to {dest_dir}"); let mut dir_queue = VecDeque::new(); dir_queue.push_back("/".to_string()); @@ -91,7 +91,7 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { let next_src_dir = path_join(&src_dir, &next_dir); let next_dest_dir = path_join(&dest_dir, &next_dir); - eprintln!("mkdir {}", &next_dest_dir); + eprintln!("mkdir {next_dest_dir}"); fs::create_dir_all(&next_dest_dir)?; for dirent_res in fs::read_dir(&next_src_dir)? { @@ -100,11 +100,11 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { let md = fs::metadata(&path)?; if md.is_dir() { let frontier = path_join(&next_dir, &dirent.file_name().to_str().unwrap()); - eprintln!("push {}", &frontier); + eprintln!("push {frontier}"); dir_queue.push_back(frontier); } else { let dest_path = path_join(&next_dest_dir, &dirent.file_name().to_str().unwrap()); - eprintln!("copy {} to {}", &path.to_str().unwrap(), &dest_path); + eprintln!("copy {} to {dest_path}", &path.to_str().unwrap()); fs::copy(path, dest_path)?; } } @@ -583,11 +583,10 @@ impl TestStacksNode { ); test_debug!( - "Miner {}: Block commit transaction builds on {},{} (parent snapshot is {:?})", + "Miner {}: Block commit transaction builds on {},{} (parent snapshot is {parent_block_snapshot_opt:?})", miner.id, block_commit_op.parent_block_ptr, - block_commit_op.parent_vtxindex, - &parent_block_snapshot_opt + block_commit_op.parent_vtxindex ); self.commit_ops.insert( block_commit_op.block_header_hash.clone(), @@ -767,16 +766,15 @@ pub fn preprocess_stacks_block_data( { Some(sn) => sn, None => { - test_debug!("Block commit did not win sorition: {:?}", block_commit_op); + test_debug!("Block commit did not win sorition: {block_commit_op:?}"); return None; } }; // "discover" this stacks block test_debug!( - "\n\nPreprocess Stacks block {}/{} ({})", + "\n\nPreprocess Stacks block {}/{block_hash} ({})", &commit_snapshot.consensus_hash, - &block_hash, StacksBlockHeader::make_index_block_hash(&commit_snapshot.consensus_hash, &block_hash) ); let block_res = node @@ -793,8 +791,7 @@ pub fn preprocess_stacks_block_data( // "discover" this stacks microblock stream for mblock in stacks_microblocks.iter() { test_debug!( - "Preprocess Stacks microblock {}-{} (seq {})", - &block_hash, + "Preprocess Stacks microblock {block_hash}-{} (seq {})", mblock.block_hash(), mblock.header.sequence ); @@ -828,11 +825,9 @@ pub fn check_block_state_index_root( .read_block_root_hash(&index_block_hash) .unwrap(); test_debug!( - "checking {}/{} state root: expecting {}, got {}", - consensus_hash, + "checking {consensus_hash}/{} state root: expecting {}, got {state_root}", &stacks_header.block_hash(), - &stacks_header.state_index_root, - &state_root + &stacks_header.state_index_root ); state_root == stacks_header.state_index_root } @@ -888,9 +883,8 @@ pub fn check_mining_reward( let mut total: u128 = 10_000_000_000 - spent_total; test_debug!( - "Miner {} has spent {} in total so far", - &miner.origin_address().unwrap(), - spent_total + "Miner {} has spent {spent_total} in total so far", + &miner.origin_address().unwrap() ); if block_height >= MINER_REWARD_MATURITY { @@ -908,13 +902,10 @@ pub fn check_mining_reward( let reward = recipient.coinbase + anchored + (3 * streamed / 5); test_debug!( - "Miner {} received a reward {} = {} + {} + {} at block {}", + "Miner {} received a reward {reward} = {} + {anchored} + {} at block {i}", &recipient.address.to_string(), - reward, recipient.coinbase, - anchored, (3 * streamed / 5), - i ); total += reward; found = true; @@ -922,9 +913,8 @@ pub fn check_mining_reward( } if !found { test_debug!( - "Miner {} received no reward at block {}", - miner.origin_address().unwrap(), - i + "Miner {} received no reward at block {i}", + miner.origin_address().unwrap() ); } } @@ -945,11 +935,9 @@ pub fn check_mining_reward( &parent_reward.block_hash, ); test_debug!( - "Miner {} received a produced-stream reward {} from {} confirmed at {}", + "Miner {} received a produced-stream reward {parent_streamed} from {} confirmed at {confirmed_block_height}", miner.origin_address().unwrap().to_string(), - parent_streamed, - heights.get(&parent_ibh).unwrap(), - confirmed_block_height + heights.get(&parent_ibh).unwrap() ); total += parent_streamed; } @@ -967,7 +955,7 @@ pub fn check_mining_reward( return total == 0; } else { if amount != total { - test_debug!("Amount {} != {}", amount, total); + test_debug!("Amount {amount} != {total}"); return false; } return true; @@ -1091,16 +1079,14 @@ pub fn make_smart_contract_with_version( (begin (var-set bar (/ x y)) (ok (var-get bar))))"; test_debug!( - "Make smart contract block at hello-world-{}-{}", - burnchain_height, - stacks_block_height + "Make smart contract block at hello-world-{burnchain_height}-{stacks_block_height}" ); let mut tx_contract = StacksTransaction::new( TransactionVersion::Testnet, miner.as_transaction_auth().unwrap(), TransactionPayload::new_smart_contract( - &format!("hello-world-{}-{}", burnchain_height, stacks_block_height), + &format!("hello-world-{burnchain_height}-{stacks_block_height}"), &contract.to_string(), version, ) @@ -1140,7 +1126,7 @@ pub fn make_contract_call( miner.as_transaction_auth().unwrap(), TransactionPayload::new_contract_call( addr.clone(), - &format!("hello-world-{}-{}", burnchain_height, stacks_block_height), + &format!("hello-world-{burnchain_height}-{stacks_block_height}"), "set-bar", vec![Value::Int(arg1), Value::Int(arg2)], ) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 06cc4799ff..582b46a2fd 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -135,16 +135,16 @@ pub fn addr2str(btc_addr: &BitcoinAddress) -> String { if let BitcoinAddress::Segwit(segwit_addr) = btc_addr { // regtest segwit addresses use a different hrp let s = segwit_addr.to_bech32_hrp("bcrt"); - warn!("Re-encoding {} to {}", &segwit_addr, &s); + warn!("Re-encoding {segwit_addr} to {s}"); s } else { - format!("{}", &btc_addr) + format!("{btc_addr}") } } #[cfg(not(test))] pub fn addr2str(btc_addr: &BitcoinAddress) -> String { - format!("{}", &btc_addr) + format!("{btc_addr}") } // TODO: add tests from mutation testing results #4862 @@ -317,15 +317,15 @@ impl BitcoinRegtestController { false, ); if let Err(err) = res { - error!("Unable to init block headers: {}", err); + error!("Unable to init block headers: {err}"); panic!() } let burnchain_params = burnchain_params_from_config(&config.burnchain); if network_id == BitcoinNetworkType::Mainnet && config.burnchain.epochs.is_some() { - panic!("It is an error to set custom epochs while running on Mainnet: network_id {:?} config.burnchain {:#?}", - &network_id, &config.burnchain); + panic!("It is an error to set custom epochs while running on Mainnet: network_id {network_id:?} config.burnchain {:#?}", + &config.burnchain); } let indexer_config = { @@ -462,7 +462,7 @@ impl BitcoinRegtestController { } Err(e) => { // keep trying - error!("Unable to sync with burnchain: {}", e); + error!("Unable to sync with burnchain: {e}"); match e { burnchain_error::TrySyncAgain => { // try again immediately @@ -573,7 +573,7 @@ impl BitcoinRegtestController { } Err(e) => { // keep trying - error!("Unable to sync with burnchain: {}", e); + error!("Unable to sync with burnchain: {e}"); match e { burnchain_error::CoordinatorClosed => { return Err(BurnchainControllerError::CoordinatorClosed) @@ -682,7 +682,7 @@ impl BitcoinRegtestController { let parsed_utxo: ParsedUTXO = match serde_json::from_value(entry) { Ok(utxo) => utxo, Err(err) => { - warn!("Failed parsing UTXO: {}", err); + warn!("Failed parsing UTXO: {err}"); continue; } }; @@ -783,7 +783,7 @@ impl BitcoinRegtestController { break utxos; } Err(e) => { - error!("Bitcoin RPC failure: error listing utxos {:?}", e); + error!("Bitcoin RPC failure: error listing utxos {e:?}"); sleep_ms(5000); continue; } @@ -814,13 +814,13 @@ impl BitcoinRegtestController { utxos = match result { Ok(utxos) => utxos, Err(e) => { - error!("Bitcoin RPC failure: error listing utxos {:?}", e); + error!("Bitcoin RPC failure: error listing utxos {e:?}"); sleep_ms(5000); continue; } }; - test_debug!("Unspent for {:?}: {:?}", &filter_addresses, &utxos); + test_debug!("Unspent for {filter_addresses:?}: {utxos:?}"); if utxos.is_empty() { return None; @@ -829,20 +829,14 @@ impl BitcoinRegtestController { } } } else { - debug!( - "Got {} UTXOs for {:?}", - utxos.utxos.len(), - &filter_addresses - ); + debug!("Got {} UTXOs for {filter_addresses:?}", utxos.utxos.len(),); utxos }; let total_unspent = utxos.total_available(); if total_unspent < total_required { warn!( - "Total unspent {} < {} for {:?}", - total_unspent, - total_required, + "Total unspent {total_unspent} < {total_required} for {:?}", &pubk.to_hex() ); return None; @@ -1495,7 +1489,7 @@ impl BitcoinRegtestController { let mut txid = tx.txid().as_bytes().to_vec(); txid.reverse(); - debug!("Transaction relying on UTXOs: {:?}", utxos); + debug!("Transaction relying on UTXOs: {utxos:?}"); let txid = Txid::from_bytes(&txid[..]).unwrap(); let mut txids = previous_txids.to_vec(); txids.push(txid); @@ -1507,12 +1501,11 @@ impl BitcoinRegtestController { }; info!( - "Miner node: submitting leader_block_commit (txid: {}, rbf: {}, total spent: {}, size: {}, fee_rate: {})", + "Miner node: submitting leader_block_commit (txid: {}, rbf: {}, total spent: {}, size: {}, fee_rate: {fee_rate})", txid.to_hex(), ongoing_block_commit.fees.is_rbf_enabled, ongoing_block_commit.fees.total_spent(), - ongoing_block_commit.fees.final_size, - fee_rate, + ongoing_block_commit.fees.final_size ); self.ongoing_block_commit = Some(ongoing_block_commit); @@ -1551,10 +1544,7 @@ impl BitcoinRegtestController { Ok(true) ); if ongoing_tx_confirmed { - debug!( - "Was able to retrieve confirmation of ongoing burnchain TXID - {}", - txid - ); + debug!("Was able to retrieve confirmation of ongoing burnchain TXID - {txid}"); let res = self.send_block_commit_operation( epoch_id, payload, @@ -1566,7 +1556,7 @@ impl BitcoinRegtestController { ); return res; } else { - debug!("Was unable to retrieve ongoing TXID - {}", txid); + debug!("Was unable to retrieve ongoing TXID - {txid}"); }; } @@ -1715,10 +1705,9 @@ impl BitcoinRegtestController { Some(utxos) => utxos, None => { warn!( - "No UTXOs for {} ({}) in epoch {}", + "No UTXOs for {} ({}) in epoch {epoch_id}", &public_key.to_hex(), - &addr2str(&addr), - epoch_id + &addr2str(&addr) ); return Err(BurnchainControllerError::NoUTXOs); } @@ -1835,18 +1824,14 @@ impl BitcoinRegtestController { } if total_consumed < total_target { - warn!( - "Consumed total {} is less than intended spend: {}", - total_consumed, total_target - ); + warn!("Consumed total {total_consumed} is less than intended spend: {total_target}"); return false; } // Append the change output let value = total_consumed - tx_cost; debug!( - "Payments value: {:?}, total_consumed: {:?}, total_spent: {:?}", - value, total_consumed, total_target + "Payments value: {value:?}, total_consumed: {total_consumed:?}, total_spent: {total_target:?}" ); if value >= DUST_UTXO_LIMIT { let change_output = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { @@ -1939,8 +1924,8 @@ impl BitcoinRegtestController { transaction.txid() }) .map_err(|e| { - error!("Bitcoin RPC error: transaction submission failed - {:?}", e); - BurnchainControllerError::TransactionSubmissionFailed(format!("{:?}", e)) + error!("Bitcoin RPC error: transaction submission failed - {e:?}"); + BurnchainControllerError::TransactionSubmissionFailed(format!("{e:?}")) }) } @@ -1958,8 +1943,8 @@ impl BitcoinRegtestController { if debug_ctr % 10 == 0 { debug!( - "Waiting until canonical sortition height reaches {} (currently {})", - height_to_wait, canonical_sortition_tip.block_height + "Waiting until canonical sortition height reaches {height_to_wait} (currently {})", + canonical_sortition_tip.block_height ); } debug_ctr += 1; @@ -1993,7 +1978,7 @@ impl BitcoinRegtestController { /// Instruct a regtest Bitcoin node to build the next block. pub fn build_next_block(&self, num_blocks: u64) { - debug!("Generate {} block(s)", num_blocks); + debug!("Generate {num_blocks} block(s)"); let public_key_bytes = match &self.config.burnchain.local_mining_public_key { Some(public_key) => hex_bytes(public_key).expect("Invalid byte sequence"), None => panic!("Unable to make new block, mining public key"), @@ -2009,7 +1994,7 @@ impl BitcoinRegtestController { match result { Ok(_) => {} Err(e) => { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } } @@ -2017,7 +2002,7 @@ impl BitcoinRegtestController { #[cfg(test)] pub fn invalidate_block(&self, block: &BurnchainHeaderHash) { - info!("Invalidating block {}", &block); + info!("Invalidating block {block}"); let request = BitcoinRPCRequest { method: "invalidateblock".into(), params: vec![json!(&block.to_string())], @@ -2025,7 +2010,7 @@ impl BitcoinRegtestController { jsonrpc: "2.0".into(), }; if let Err(e) = BitcoinRPCRequest::send(&self.config, request) { - error!("Bitcoin RPC failure: error invalidating block {:?}", e); + error!("Bitcoin RPC failure: error invalidating block {e:?}"); panic!(); } } @@ -2043,7 +2028,7 @@ impl BitcoinRegtestController { BurnchainHeaderHash::from_hex(v.get("result").unwrap().as_str().unwrap()).unwrap() } Err(e) => { - error!("Bitcoin RPC failure: error invalidating block {:?}", e); + error!("Bitcoin RPC failure: error invalidating block {e:?}"); panic!(); } } @@ -2138,7 +2123,7 @@ impl BitcoinRegtestController { num_blocks.try_into().unwrap(), addr2str(&address), ) { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } return; @@ -2158,7 +2143,7 @@ impl BitcoinRegtestController { if let Err(e) = BitcoinRPCRequest::generate_to_address(&self.config, 1, addr2str(&address)) { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } } @@ -2249,10 +2234,7 @@ impl BurnchainController for BitcoinRegtestController { // Evaluate process_exit_at_block_height setting if let Some(cap) = self.config.burnchain.process_exit_at_block_height { if burnchain_tip.block_snapshot.block_height >= cap { - info!( - "Node succesfully reached the end of the ongoing {} blocks epoch!", - cap - ); + info!("Node succesfully reached the end of the ongoing {cap} blocks epoch!"); info!("This process will automatically terminate in 30s, restart your node for participating in the next epoch."); sleep_ms(30000); std::process::exit(0); @@ -2333,8 +2315,7 @@ impl SerializedTx { } pub fn to_hex(&self) -> String { - let formatted_bytes: Vec = - self.bytes.iter().map(|b| format!("{:02x}", b)).collect(); + let formatted_bytes: Vec = self.bytes.iter().map(|b| format!("{b:02x}")).collect(); formatted_bytes.join("").to_string() } } @@ -2367,7 +2348,7 @@ impl ParsedUTXO { Some(Sha256dHash::from(&txid[..])) } Err(err) => { - warn!("Unable to get txid from UTXO {}", err); + warn!("Unable to get txid from UTXO {err}"); None } } @@ -2396,7 +2377,7 @@ impl ParsedUTXO { Some(amount) } (lhs, rhs) => { - warn!("Error while converting BTC to sat {:?} - {:?}", lhs, rhs); + warn!("Error while converting BTC to sat {lhs:?} - {rhs:?}"); None } } @@ -2409,7 +2390,7 @@ impl ParsedUTXO { let base: u64 = 10; let int_part = amount / base.pow(8); let frac_part = amount % base.pow(8); - let amount = format!("{}.{:08}", int_part, frac_part); + let amount = format!("{int_part}.{frac_part:08}"); amount } @@ -2447,13 +2428,13 @@ type RPCResult = Result; impl From for RPCError { fn from(ioe: io::Error) -> Self { - Self::Network(format!("IO Error: {:?}", &ioe)) + Self::Network(format!("IO Error: {ioe:?}")) } } impl From for RPCError { fn from(ne: NetError) -> Self { - Self::Network(format!("Net Error: {:?}", &ne)) + Self::Network(format!("Net Error: {ne:?}")) } } @@ -2466,11 +2447,11 @@ impl BitcoinRPCRequest { _ => None, }; let url = config.burnchain.get_rpc_url(wallet_id); - Url::parse(&url).unwrap_or_else(|_| panic!("Unable to parse {} as a URL", url)) + Url::parse(&url).unwrap_or_else(|_| panic!("Unable to parse {url} as a URL")) }; debug!( - "BitcoinRPC builder '{}': {:?}:{:?}@{}", - &payload.method, &config.burnchain.username, &config.burnchain.password, &url + "BitcoinRPC builder '{}': {:?}:{:?}@{url}", + &payload.method, &config.burnchain.username, &config.burnchain.password ); let host = url @@ -2497,7 +2478,7 @@ impl BitcoinRPCRequest { if let (Some(username), Some(password)) = (&config.burnchain.username, &config.burnchain.password) { - let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); + let auth_token = format!("Basic {}", encode(format!("{username}:{password}"))); request.add_header("Authorization".into(), auth_token); } request @@ -2505,15 +2486,15 @@ impl BitcoinRPCRequest { #[cfg(test)] pub fn get_raw_transaction(config: &Config, txid: &Txid) -> RPCResult { - debug!("Get raw transaction {}", txid); + debug!("Get raw transaction {txid}"); let payload = BitcoinRPCRequest { method: "getrawtransaction".to_string(), - params: vec![format!("{}", txid).into()], + params: vec![format!("{txid}").into()], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; let res = BitcoinRPCRequest::send(config, payload)?; - debug!("Got raw transaction {}: {:?}", txid, &res); + debug!("Got raw transaction {txid}: {res:?}"); Ok(res.get("result").unwrap().as_str().unwrap().to_string()) } @@ -2521,7 +2502,7 @@ impl BitcoinRPCRequest { pub fn check_transaction_confirmed(config: &Config, txid: &Txid) -> RPCResult { let payload = BitcoinRPCRequest { method: "gettransaction".to_string(), - params: vec![format!("{}", txid).into()], + params: vec![format!("{txid}").into()], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; @@ -2544,7 +2525,7 @@ impl BitcoinRPCRequest { } pub fn generate_to_address(config: &Config, num_blocks: u64, address: String) -> RPCResult<()> { - debug!("Generate {} blocks to {}", num_blocks, &address); + debug!("Generate {num_blocks} blocks to {address}"); let payload = BitcoinRPCRequest { method: "generatetoaddress".to_string(), params: vec![num_blocks.into(), address.clone().into()], @@ -2553,10 +2534,7 @@ impl BitcoinRPCRequest { }; let res = BitcoinRPCRequest::send(config, payload)?; - debug!( - "Generated {} blocks to {}: {:?}", - num_blocks, &address, &res - ); + debug!("Generated {num_blocks} blocks to {address}: {res:?}"); Ok(()) } @@ -2623,7 +2601,7 @@ impl BitcoinRPCRequest { let parsed_utxo: ParsedUTXO = match serde_json::from_value(entry) { Ok(utxo) => utxo, Err(err) => { - warn!("Failed parsing UTXO: {}", err); + warn!("Failed parsing UTXO: {err}"); continue; } }; @@ -2687,7 +2665,7 @@ impl BitcoinRPCRequest { if let Some(e) = json_resp.get("error") { if !e.is_null() { - error!("Error submitting transaction: {}", json_resp); + error!("Error submitting transaction: {json_resp}"); return Err(RPCError::Bitcoind(json_resp.to_string())); } } @@ -2743,7 +2721,7 @@ impl BitcoinRPCRequest { let payload = BitcoinRPCRequest { method: "importdescriptors".to_string(), params: vec![ - json!([{ "desc": format!("addr({})#{}", &addr2str(&address), &checksum), "timestamp": 0, "internal": true }]), + json!([{ "desc": format!("addr({})#{checksum}", &addr2str(&address)), "timestamp": 0, "internal": true }]), ], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), @@ -2772,7 +2750,7 @@ impl BitcoinRPCRequest { let parsed_wallet_name: String = match serde_json::from_value(entry) { Ok(wallet_name) => wallet_name, Err(err) => { - warn!("Failed parsing wallet name: {}", err); + warn!("Failed parsing wallet name: {err}"); continue; } }; @@ -3003,7 +2981,7 @@ mod tests { ) .unwrap(); - debug!("send_block_commit_operation:\n{:#?}", &block_commit); + debug!("send_block_commit_operation:\n{block_commit:#?}"); debug!("{}", &SerializedTx::new(block_commit.clone()).to_hex()); assert_eq!(block_commit.output[3].value, 323507); diff --git a/testnet/stacks-node/src/chain_data.rs b/testnet/stacks-node/src/chain_data.rs index c7fdaf6cee..cc60f964a3 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/testnet/stacks-node/src/chain_data.rs @@ -114,8 +114,7 @@ impl MinerStats { } else { // PoX reward-phase is not active debug!( - "Block {} is in a prepare phase or post-PoX sunset, so no windowing will take place", - burn_block_height; + "Block {burn_block_height} is in a prepare phase or post-PoX sunset, so no windowing will take place" ); assert_eq!(windowed_block_commits.len(), 1); @@ -196,19 +195,19 @@ impl MinerStats { .stderr(Stdio::piped()) .args(args); - debug!("Run: `{:?}`", &cmd); + debug!("Run: `{cmd:?}`"); let output = cmd .spawn() - .map_err(|e| format!("Failed to run `{}`: {:?}", &full_args, &e))? + .map_err(|e| format!("Failed to run `{full_args}`: {e:?}"))? .wait_with_output() - .map_err(|ioe| format!("Failed to run `{}`: {:?}", &full_args, &ioe))?; + .map_err(|ioe| format!("Failed to run `{full_args}`: {ioe:?}"))?; let exit_code = match output.status.code() { Some(code) => code, None => { // failed due to signal - return Err(format!("Failed to run `{}`: killed by signal", &full_args)); + return Err(format!("Failed to run `{full_args}`: killed by signal")); } }; @@ -225,8 +224,8 @@ impl MinerStats { Self::run_subprocess(&self.unconfirmed_commits_helper, all_miners)?; if exit_code != 0 { return Err(format!( - "Failed to run `{}`: exit code {}", - &self.unconfirmed_commits_helper, exit_code + "Failed to run `{}`: exit code {exit_code}", + &self.unconfirmed_commits_helper )); } @@ -234,9 +233,8 @@ impl MinerStats { let unconfirmed_commits: Vec = serde_json::from_slice(&stdout) .map_err(|e| { format!( - "Failed to decode output from `{}`: {:?}. Output was `{}`", + "Failed to decode output from `{}`: {e:?}. Output was `{}`", &self.unconfirmed_commits_helper, - &e, String::from_utf8_lossy(&stdout) ) })?; @@ -255,21 +253,20 @@ impl MinerStats { let mut decoded_pox_addrs = vec![]; for pox_addr_hex in unconfirmed_commit.pox_addrs.iter() { let Ok(pox_addr_bytes) = hex_bytes(pox_addr_hex) else { - return Err(format!("Not a hex string: `{}`", &pox_addr_hex)); + return Err(format!("Not a hex string: `{pox_addr_hex}`")); }; let Some(bitcoin_addr) = BitcoinAddress::from_scriptpubkey(BitcoinNetworkType::Mainnet, &pox_addr_bytes) else { return Err(format!( - "Not a recognized Bitcoin scriptpubkey: {}", - &pox_addr_hex + "Not a recognized Bitcoin scriptpubkey: {pox_addr_hex}" )); }; let Some(pox_addr) = PoxAddress::try_from_bitcoin_output(&BitcoinTxOutput { address: bitcoin_addr.clone(), units: 1, }) else { - return Err(format!("Not a recognized PoX address: {}", &bitcoin_addr)); + return Err(format!("Not a recognized PoX address: {bitcoin_addr}")); }; decoded_pox_addrs.push(pox_addr); } @@ -1042,7 +1039,7 @@ EOF ] { let spend = *spend_dist .get(miner) - .unwrap_or_else(|| panic!("no spend for {}", &miner)); + .unwrap_or_else(|| panic!("no spend for {miner}")); match miner.as_str() { "miner-1" => { assert_eq!(spend, 2); @@ -1057,7 +1054,7 @@ EOF assert_eq!(spend, 10); } _ => { - panic!("unknown miner {}", &miner); + panic!("unknown miner {miner}"); } } } @@ -1075,7 +1072,7 @@ EOF ] { let prob = *win_probs .get(miner) - .unwrap_or_else(|| panic!("no probability for {}", &miner)); + .unwrap_or_else(|| panic!("no probability for {miner}")); match miner.as_str() { "miner-1" => { assert!((prob - (2.0 / 25.0)).abs() < 0.00001); @@ -1090,7 +1087,7 @@ EOF assert!((prob - (10.0 / 25.0)).abs() < 0.00001); } _ => { - panic!("unknown miner {}", &miner); + panic!("unknown miner {miner}"); } } } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index d94dbd41de..6c8b2b6cb1 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -108,7 +108,7 @@ pub struct ConfigFile { impl ConfigFile { pub fn from_path(path: &str) -> Result { - let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {}", &e))?; + let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {e}"))?; let mut f = Self::from_str(&content)?; f.__path = Some(path.to_string()); Ok(f) @@ -117,7 +117,7 @@ impl ConfigFile { #[allow(clippy::should_implement_trait)] pub fn from_str(content: &str) -> Result { let mut config: ConfigFile = - toml::from_str(content).map_err(|e| format!("Invalid toml: {}", e))?; + toml::from_str(content).map_err(|e| format!("Invalid toml: {e}"))?; if let Some(mstx_balance) = config.mstx_balance.take() { warn!("'mstx_balance' in the config is deprecated; please use 'ustx_balance' instead."); match config.ustx_balance { @@ -393,24 +393,24 @@ impl Config { if let Some(first_burn_block_height) = self.burnchain.first_burn_block_height { debug!( - "Override first_block_height from {} to {}", - burnchain.first_block_height, first_burn_block_height + "Override first_block_height from {} to {first_burn_block_height}", + burnchain.first_block_height ); burnchain.first_block_height = first_burn_block_height; } if let Some(first_burn_block_timestamp) = self.burnchain.first_burn_block_timestamp { debug!( - "Override first_block_timestamp from {} to {}", - burnchain.first_block_timestamp, first_burn_block_timestamp + "Override first_block_timestamp from {} to {first_burn_block_timestamp}", + burnchain.first_block_timestamp ); burnchain.first_block_timestamp = first_burn_block_timestamp; } if let Some(first_burn_block_hash) = &self.burnchain.first_burn_block_hash { debug!( - "Override first_burn_block_hash from {} to {}", - burnchain.first_block_hash, first_burn_block_hash + "Override first_burn_block_hash from {} to {first_burn_block_hash}", + burnchain.first_block_hash ); burnchain.first_block_hash = BurnchainHeaderHash::from_hex(first_burn_block_hash) .expect("Invalid first_burn_block_hash"); @@ -428,8 +428,8 @@ impl Config { if let Some(v1_unlock_height) = self.burnchain.pox_2_activation { debug!( - "Override v1_unlock_height from {} to {}", - burnchain.pox_constants.v1_unlock_height, v1_unlock_height + "Override v1_unlock_height from {} to {v1_unlock_height}", + burnchain.pox_constants.v1_unlock_height ); burnchain.pox_constants.v1_unlock_height = v1_unlock_height; } @@ -511,16 +511,16 @@ impl Config { if let Some(sunset_start) = self.burnchain.sunset_start { debug!( - "Override sunset_start from {} to {}", - burnchain.pox_constants.sunset_start, sunset_start + "Override sunset_start from {} to {sunset_start}", + burnchain.pox_constants.sunset_start ); burnchain.pox_constants.sunset_start = sunset_start.into(); } if let Some(sunset_end) = self.burnchain.sunset_end { debug!( - "Override sunset_end from {} to {}", - burnchain.pox_constants.sunset_end, sunset_end + "Override sunset_end from {} to {sunset_end}", + burnchain.pox_constants.sunset_end ); burnchain.pox_constants.sunset_end = sunset_end.into(); } @@ -595,7 +595,7 @@ impl Config { match Burnchain::new(&working_dir, &self.burnchain.chain, &network_name) { Ok(burnchain) => burnchain, Err(e) => { - error!("Failed to instantiate burnchain: {}", e); + error!("Failed to instantiate burnchain: {e}"); panic!() } } @@ -621,7 +621,7 @@ impl Config { assert!( v1_unlock_height > epoch21.start_height, - "FATAL: v1 unlock height occurs at or before pox-2 activation: {} <= {}\nburnchain: {:?}", v1_unlock_height, epoch21.start_height, burnchain + "FATAL: v1 unlock height occurs at or before pox-2 activation: {v1_unlock_height} <= {}\nburnchain: {burnchain:?}", epoch21.start_height ); let epoch21_rc = burnchain @@ -636,8 +636,7 @@ impl Config { // the reward cycle boundary. assert!( !burnchain.is_reward_cycle_start(v1_unlock_height), - "FATAL: v1 unlock height is at a reward cycle boundary\nburnchain: {:?}", - burnchain + "FATAL: v1 unlock height is at a reward cycle boundary\nburnchain: {burnchain:?}" ); } } @@ -679,7 +678,7 @@ impl Config { } else if epoch_name == EPOCH_CONFIG_3_0_0 { Ok(StacksEpochId::Epoch30) } else { - Err(format!("Unknown epoch name specified: {}", epoch_name)) + Err(format!("Unknown epoch name specified: {epoch_name}")) }?; matched_epochs.push((epoch_id, configured_epoch.start_height)); } @@ -710,9 +709,7 @@ impl Config { .zip(matched_epochs.iter().map(|(epoch_id, _)| epoch_id)) { if expected_epoch != configured_epoch { - return Err(format!( - "Configured epochs may not skip an epoch. Expected epoch = {}, Found epoch = {}", - expected_epoch, configured_epoch)); + return Err(format!("Configured epochs may not skip an epoch. Expected epoch = {expected_epoch}, Found epoch = {configured_epoch}")); } } @@ -732,8 +729,8 @@ impl Config { for (i, (epoch_id, start_height)) in matched_epochs.iter().enumerate() { if epoch_id != &out_epochs[i].epoch_id { return Err( - format!("Unmatched epochs in configuration and node implementation. Implemented = {}, Configured = {}", - epoch_id, &out_epochs[i].epoch_id)); + format!("Unmatched epochs in configuration and node implementation. Implemented = {epoch_id}, Configured = {}", + &out_epochs[i].epoch_id)); } // end_height = next epoch's start height || i64::max if last epoch let end_height = if i + 1 < matched_epochs.len() { @@ -759,7 +756,7 @@ impl Config { .find(|&e| e.epoch_id == StacksEpochId::Epoch21) .ok_or("Cannot configure pox_2_activation if epoch 2.1 is not configured")?; if last_epoch.start_height > pox_2_activation as u64 { - Err(format!("Cannot configure pox_2_activation at a lower height than the Epoch 2.1 start height. pox_2_activation = {}, epoch 2.1 start height = {}", pox_2_activation, last_epoch.start_height))?; + Err(format!("Cannot configure pox_2_activation at a lower height than the Epoch 2.1 start height. pox_2_activation = {pox_2_activation}, epoch 2.1 start height = {}", last_epoch.start_height))?; } } @@ -1267,14 +1264,11 @@ impl BurnchainConfig { false => "http://", }; let wallet_path = if let Some(wallet_id) = wallet.as_ref() { - format!("/wallet/{}", wallet_id) + format!("/wallet/{wallet_id}") } else { "".to_string() }; - format!( - "{}{}:{}{}", - scheme, self.peer_host, self.rpc_port, wallet_path - ) + format!("{scheme}{}:{}{wallet_path}", self.peer_host, self.rpc_port) } pub fn get_rpc_socket_addr(&self) -> SocketAddr { @@ -1505,15 +1499,14 @@ impl BurnchainConfigFile { // Using std::net::LookupHost would be preferable, but it's // unfortunately unstable at this point. // https://doc.rust-lang.org/1.6.0/std/net/struct.LookupHost.html - let mut sock_addrs = format!("{}:1", &peer_host) + let mut sock_addrs = format!("{peer_host}:1") .to_socket_addrs() - .map_err(|e| format!("Invalid burnchain.peer_host: {}", &e))?; + .map_err(|e| format!("Invalid burnchain.peer_host: {e}"))?; let sock_addr = match sock_addrs.next() { Some(addr) => addr, None => { return Err(format!( - "No IP address could be queried for '{}'", - &peer_host + "No IP address could be queried for '{peer_host}'" )); } }; @@ -1710,10 +1703,7 @@ impl CostEstimatorName { if &s.to_lowercase() == "naive_pessimistic" { CostEstimatorName::NaivePessimistic } else { - panic!( - "Bad cost estimator name supplied in configuration file: {}", - s - ); + panic!("Bad cost estimator name supplied in configuration file: {s}"); } } } @@ -1725,10 +1715,7 @@ impl FeeEstimatorName { } else if &s.to_lowercase() == "fuzzed_weighted_median_fee_rate" { FeeEstimatorName::FuzzedWeightedMedianFeeRate } else { - panic!( - "Bad fee estimator name supplied in configuration file: {}", - s - ); + panic!("Bad fee estimator name supplied in configuration file: {s}"); } } } @@ -1738,7 +1725,7 @@ impl CostMetricName { if &s.to_lowercase() == "proportion_dot_product" { CostMetricName::ProportionDotProduct } else { - panic!("Bad cost metric name supplied in configuration file: {}", s); + panic!("Bad cost metric name supplied in configuration file: {s}"); } } } @@ -1908,7 +1895,7 @@ impl Default for NodeConfig { rng.fill_bytes(&mut buf); let now = get_epoch_time_ms(); - let testnet_id = format!("stacks-node-{}", now); + let testnet_id = format!("stacks-node-{now}"); let rpc_port = 20443; let p2p_port = 20444; @@ -1923,11 +1910,11 @@ impl Default for NodeConfig { NodeConfig { name: name.to_string(), seed: seed.to_vec(), - working_dir: format!("/tmp/{}", testnet_id), - rpc_bind: format!("0.0.0.0:{}", rpc_port), - p2p_bind: format!("0.0.0.0:{}", p2p_port), - data_url: format!("http://127.0.0.1:{}", rpc_port), - p2p_address: format!("127.0.0.1:{}", rpc_port), + working_dir: format!("/tmp/{testnet_id}"), + rpc_bind: format!("0.0.0.0:{rpc_port}"), + p2p_bind: format!("0.0.0.0:{p2p_port}"), + data_url: format!("http://127.0.0.1:{rpc_port}"), + p2p_address: format!("127.0.0.1:{rpc_port}"), bootstrap_node: vec![], deny_nodes: vec![], local_peer_seed: local_peer_seed.to_vec(), @@ -2015,15 +2002,12 @@ impl NodeConfig { pub fn add_bootstrap_node(&mut self, bootstrap_node: &str, chain_id: u32, peer_version: u32) { let parts: Vec<&str> = bootstrap_node.split('@').collect(); if parts.len() != 2 { - panic!( - "Invalid bootstrap node '{}': expected PUBKEY@IP:PORT", - bootstrap_node - ); + panic!("Invalid bootstrap node '{bootstrap_node}': expected PUBKEY@IP:PORT"); } let (pubkey_str, hostport) = (parts[0], parts[1]); let pubkey = Secp256k1PublicKey::from_hex(pubkey_str) .unwrap_or_else(|_| panic!("Invalid public key '{pubkey_str}'")); - debug!("Resolve '{}'", &hostport); + debug!("Resolve '{hostport}'"); let mut attempts = 0; let max_attempts = 5; @@ -2035,22 +2019,16 @@ impl NodeConfig { if let Some(addr) = addrs.next() { break addr; } else { - panic!("No addresses found for '{}'", hostport); + panic!("No addresses found for '{hostport}'"); } } Err(e) => { if attempts >= max_attempts { - panic!( - "Failed to resolve '{}' after {} attempts: {}", - hostport, max_attempts, e - ); + panic!("Failed to resolve '{hostport}' after {max_attempts} attempts: {e}"); } else { error!( - "Attempt {} - Failed to resolve '{}': {}. Retrying in {:?}...", + "Attempt {} - Failed to resolve '{hostport}': {e}. Retrying in {delay:?}...", attempts + 1, - hostport, - e, - delay ); thread::sleep(delay); attempts += 1; @@ -2260,7 +2238,7 @@ impl ConnectionOptionsFile { public_ip_address .parse::() .map(|addr| (PeerAddress::from_socketaddr(&addr), addr.port())) - .map_err(|e| format!("Invalid connection_option.public_ip_address: {}", e)) + .map_err(|e| format!("Invalid connection_option.public_ip_address: {e}")) }) .transpose()?; let mut read_only_call_limit = HELIUM_DEFAULT_CONNECTION_OPTIONS @@ -2641,7 +2619,7 @@ impl MinerConfigFile { |txs_to_consider_str| match str::parse(txs_to_consider_str) { Ok(txtype) => txtype, Err(e) => { - panic!("could not parse '{}': {}", &txs_to_consider_str, &e); + panic!("could not parse '{txs_to_consider_str}': {e}"); } }, ) @@ -2657,7 +2635,7 @@ impl MinerConfigFile { .map(|origin_str| match StacksAddress::from_string(origin_str) { Some(addr) => addr, None => { - panic!("could not parse '{}' into a Stacks address", origin_str); + panic!("could not parse '{origin_str}' into a Stacks address"); } }) .collect() @@ -2995,7 +2973,7 @@ mod tests { "#, ) .unwrap_err(); - println!("{}", err); + println!("{err}"); assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); } @@ -3016,7 +2994,7 @@ mod tests { fn test_example_confs() { // For each config file in the ../conf/ directory, we should be able to parse it let conf_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("conf"); - println!("Reading config files from: {:?}", conf_dir); + println!("Reading config files from: {conf_dir:?}"); let conf_files = fs::read_dir(conf_dir).unwrap(); for entry in conf_files { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index dd587077a6..88bfc8dae7 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -380,7 +380,7 @@ impl EventObserver { } Err(err) => { // Log the error, then retry after a delay - warn!("Failed to insert payload into event observer database: {:?}", err; + warn!("Failed to insert payload into event observer database: {err:?}"; "backoff" => ?backoff, "attempts" => attempts ); @@ -463,7 +463,7 @@ impl EventObserver { ); let url = Url::parse(full_url) - .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {} as a URL", full_url)); + .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {full_url} as a URL")); let host = url.host_str().expect("Invalid URL: missing host"); let port = url.port_or_known_default().unwrap_or(80); @@ -500,8 +500,7 @@ impl EventObserver { } Err(err) => { warn!( - "Event dispatcher: connection or request failed to {}:{} - {:?}", - &host, &port, err; + "Event dispatcher: connection or request failed to {host}:{port} - {err:?}"; "backoff" => ?backoff, "attempts" => attempts ); @@ -555,11 +554,11 @@ impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { // Construct the full URL let url_str = if path.starts_with('/') { - format!("{}{}", &self.endpoint, path) + format!("{}{path}", &self.endpoint) } else { - format!("{}/{}", &self.endpoint, path) + format!("{}/{path}", &self.endpoint) }; - let full_url = format!("http://{}", url_str); + let full_url = format!("http://{url_str}"); if let Some(db_path) = &self.db_path { let conn = @@ -610,7 +609,7 @@ impl EventObserver { .collect(); json!({ - "burn_block_hash": format!("0x{}", burn_block), + "burn_block_hash": format!("0x{burn_block}"), "burn_block_height": burn_block_height, "reward_recipients": serde_json::Value::Array(reward_recipients), "reward_slot_holders": serde_json::Value::Array(reward_slot_holders), @@ -747,10 +746,10 @@ impl EventObserver { .collect(); let payload = json!({ - "parent_index_block_hash": format!("0x{}", parent_index_block_hash), + "parent_index_block_hash": format!("0x{parent_index_block_hash}"), "events": serialized_events, "transactions": serialized_txs, - "burn_block_hash": format!("0x{}", burn_block_hash), + "burn_block_hash": format!("0x{burn_block_hash}"), "burn_block_height": burn_block_height, "burn_block_timestamp": burn_block_timestamp, }); @@ -845,17 +844,17 @@ impl EventObserver { "block_time": block_timestamp, "burn_block_hash": format!("0x{}", metadata.burn_header_hash), "burn_block_height": metadata.burn_header_height, - "miner_txid": format!("0x{}", winner_txid), + "miner_txid": format!("0x{winner_txid}"), "burn_block_time": metadata.burn_header_timestamp, "index_block_hash": format!("0x{}", metadata.index_block_hash()), "parent_block_hash": format!("0x{}", block.parent_block_hash), - "parent_index_block_hash": format!("0x{}", parent_index_hash), + "parent_index_block_hash": format!("0x{parent_index_hash}"), "parent_microblock": format!("0x{}", block.parent_microblock_hash), "parent_microblock_sequence": block.parent_microblock_sequence, "matured_miner_rewards": mature_rewards.clone(), "events": serialized_events, "transactions": serialized_txs, - "parent_burn_block_hash": format!("0x{}", parent_burn_block_hash), + "parent_burn_block_hash": format!("0x{parent_burn_block_hash}"), "parent_burn_block_height": parent_burn_block_height, "parent_burn_block_timestamp": parent_burn_block_timestamp, "anchored_cost": anchored_consumed, @@ -1540,8 +1539,7 @@ impl EventDispatcher { modified_slots: Vec, ) { debug!( - "event_dispatcher: New StackerDB chunk events for {}: {:?}", - contract_id, modified_slots + "event_dispatcher: New StackerDB chunk events for {contract_id}: {modified_slots:?}" ); let interested_observers = self.filter_observers(&self.stackerdb_observers_lookup, false); @@ -1582,7 +1580,7 @@ impl EventDispatcher { let dropped_txids: Vec<_> = txs .into_iter() - .map(|tx| serde_json::Value::String(format!("0x{}", &tx))) + .map(|tx| serde_json::Value::String(format!("0x{tx}"))) .collect(); let payload = json!({ @@ -1875,8 +1873,7 @@ mod test { // Assert that the connection attempt timed out assert!( result.is_err(), - "Expected a timeout error, but got {:?}", - result + "Expected a timeout error, but got {result:?}" ); assert_eq!( result.unwrap_err().kind(), @@ -2134,7 +2131,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let request = server.recv().unwrap(); assert_eq!(request.url(), "/test"); @@ -2149,7 +2146,7 @@ mod test { }); let observer = - EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); + EventObserver::new(None, format!("127.0.0.1:{port}"), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -2168,7 +2165,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; while let Ok(request) = server.recv() { @@ -2198,7 +2195,7 @@ mod test { }); let observer = - EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); + EventObserver::new(None, format!("127.0.0.1:{port}"), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -2218,7 +2215,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; let mut _request_holder = None; @@ -2242,7 +2239,7 @@ mod test { } }); - let observer = EventObserver::new(None, format!("127.0.0.1:{}", port), timeout); + let observer = EventObserver::new(None, format!("127.0.0.1:{port}"), timeout); let payload = json!({"key": "value"}); @@ -2255,7 +2252,7 @@ mod test { // Record the time after the function returns let elapsed_time = start_time.elapsed(); - println!("Elapsed time: {:?}", elapsed_time); + println!("Elapsed time: {elapsed_time:?}"); assert!( elapsed_time >= timeout, "Expected a timeout, but the function returned too quickly" @@ -2281,9 +2278,9 @@ mod test { // Set up a channel to notify when the server has processed the request let (tx, rx) = channel(); - info!("Starting mock server on port {}", port); + info!("Starting mock server on port {port}"); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; let mut _request_holder = None; @@ -2334,7 +2331,7 @@ mod test { let observer = EventObserver::new( Some(working_dir.clone()), - format!("127.0.0.1:{}", port), + format!("127.0.0.1:{port}"), timeout, ); diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index 3e527e76e4..c285c6a168 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -284,8 +284,7 @@ impl Globals { **leader_key_registration_state { info!( - "Received burnchain block #{} including key_register_op - {}", - burn_block_height, txid + "Received burnchain block #{burn_block_height} including key_register_op - {txid}" ); if txid == op.txid { let active_key = RegisteredKey { @@ -302,8 +301,8 @@ impl Globals { activated_key = Some(active_key); } else { debug!( - "key_register_op {} does not match our pending op {}", - txid, &op.txid + "key_register_op {txid} does not match our pending op {}", + &op.txid ); } } diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index 9402ebbad5..4e85750880 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -361,7 +361,7 @@ mod tests { let vrf_sk = match self.vrf_map.get(vrf_pk) { Some(vrf_pk) => vrf_pk, None => { - warn!("No VRF secret key on file for {:?}", vrf_pk); + warn!("No VRF secret key on file for {vrf_pk:?}"); return None; } }; diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index e795101c94..4fa1c5e5a7 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -63,11 +63,11 @@ static GLOBAL: Jemalloc = Jemalloc; /// Implmentation of `pick_best_tip` CLI option fn cli_pick_best_tip(config_path: &str, at_stacks_height: Option) -> TipCandidate { - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); let config = match ConfigFile::from_path(config_path) { Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -103,11 +103,11 @@ fn cli_get_miner_spend( mine_start: Option, at_burnchain_height: Option, ) -> u64 { - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); let config = match ConfigFile::from_path(config_path) { Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -181,7 +181,7 @@ fn cli_get_miner_spend( .map(|(miner, _cmt)| miner.as_str()) .collect(); - info!("Active miners: {:?}", &active_miners); + info!("Active miners: {active_miners:?}"); let Ok(unconfirmed_block_commits) = miner_stats .get_unconfirmed_commits(burn_block_height + 1, &active_miners) @@ -195,10 +195,7 @@ fn cli_get_miner_spend( .map(|cmt| (format!("{}", &cmt.apparent_sender), cmt.burn_fee)) .collect(); - info!( - "Found unconfirmed block-commits: {:?}", - &unconfirmed_miners_and_amounts - ); + info!("Found unconfirmed block-commits: {unconfirmed_miners_and_amounts:?}"); let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( &active_miners_and_commits, @@ -231,10 +228,10 @@ fn cli_get_miner_spend( MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; - info!("Unconfirmed spend distribution: {:?}", &spend_dist); + info!("Unconfirmed spend distribution: {spend_dist:?}"); info!( - "Unconfirmed win probabilities (fast_rampup={}): {:?}", - config.miner.fast_rampup, &win_probs + "Unconfirmed win probabilities (fast_rampup={}): {win_probs:?}", + config.miner.fast_rampup ); let miner_addrs = BlockMinerThread::get_miner_addrs(&config, &keychain); @@ -245,8 +242,8 @@ fn cli_get_miner_spend( .unwrap_or(0.0); info!( - "This miner's win probability at {} is {}", - tip.block_height, &win_prob + "This miner's win probability at {} is {win_prob}", + tip.block_height ); win_prob }, @@ -257,9 +254,9 @@ fn cli_get_miner_spend( fn main() { panic::set_hook(Box::new(|panic_info| { - error!("Process abort due to thread panic: {}", panic_info); + error!("Process abort due to thread panic: {panic_info}"); let bt = Backtrace::new(); - error!("Panic backtrace: {:?}", &bt); + error!("Panic backtrace: {bt:?}"); // force a core dump #[cfg(unix)] @@ -287,10 +284,7 @@ fn main() { .expect("Failed to parse --mine-at-height argument"); if let Some(mine_start) = mine_start { - info!( - "Will begin mining once Stacks chain has synced to height >= {}", - mine_start - ); + info!("Will begin mining once Stacks chain has synced to height >= {mine_start}"); } let config_file = match subcommand.as_str() { @@ -313,14 +307,14 @@ fn main() { "check-config" => { let config_path: String = args.value_from_str("--config").unwrap(); args.finish(); - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); let config_file = match ConfigFile::from_path(&config_path) { Ok(config_file) => { - debug!("Loaded config file: {:?}", config_file); + debug!("Loaded config file: {config_file:?}"); config_file } Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -330,7 +324,7 @@ fn main() { process::exit(0); } Err(e) => { - warn!("Invalid config: {}", e); + warn!("Invalid config: {e}"); process::exit(1); } }; @@ -338,11 +332,11 @@ fn main() { "start" => { let config_path: String = args.value_from_str("--config").unwrap(); args.finish(); - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); match ConfigFile::from_path(&config_path) { Ok(config_file) => config_file, Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } } @@ -389,7 +383,7 @@ fn main() { args.finish(); let best_tip = cli_pick_best_tip(&config_path, at_stacks_height); - println!("Best tip is {:?}", &best_tip); + println!("Best tip is {best_tip:?}"); process::exit(0); } "get-spend-amount" => { @@ -399,7 +393,7 @@ fn main() { args.finish(); let spend_amount = cli_get_miner_spend(&config_path, mine_start, at_burnchain_height); - println!("Will spend {}", spend_amount); + println!("Will spend {spend_amount}"); process::exit(0); } _ => { @@ -411,7 +405,7 @@ fn main() { let conf = match Config::from_config_file(config_file, true) { Ok(conf) => conf, Err(e) => { - warn!("Invalid config: {}", e); + warn!("Invalid config: {e}"); process::exit(1); } }; @@ -425,7 +419,7 @@ fn main() { if conf.burnchain.mode == "helium" || conf.burnchain.mode == "mocknet" { let mut run_loop = helium::RunLoop::new(conf); if let Err(e) = run_loop.start(num_round) { - warn!("Helium runloop exited: {}", e); + warn!("Helium runloop exited: {e}"); } } else if conf.burnchain.mode == "neon" || conf.burnchain.mode == "nakamoto-neon" diff --git a/testnet/stacks-node/src/monitoring/prometheus.rs b/testnet/stacks-node/src/monitoring/prometheus.rs index e9705142d0..f91ac53bb4 100644 --- a/testnet/stacks-node/src/monitoring/prometheus.rs +++ b/testnet/stacks-node/src/monitoring/prometheus.rs @@ -20,10 +20,7 @@ pub fn start_serving_prometheus_metrics(bind_address: String) -> Result<(), Moni warn!("Prometheus monitoring: unable to get local bind address, will not spawn prometheus endpoint service."); MonitoringError::UnableToGetAddress })?; - info!( - "Prometheus monitoring: server listening on http://{}", - local_addr - ); + info!("Prometheus monitoring: server listening on http://{local_addr}"); let mut incoming = listener.incoming(); while let Some(stream) = incoming.next().await { diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index ecf37ae0ec..f92b571acf 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -131,7 +131,7 @@ impl StacksNode { .get_miner_address(StacksEpochId::Epoch21, &public_key); let miner_addr_str = addr2str(&miner_addr); let _ = monitoring::set_burnchain_signer(BurnchainSigner(miner_addr_str)).map_err(|e| { - warn!("Failed to set global burnchain signer: {:?}", &e); + warn!("Failed to set global burnchain signer: {e:?}"); e }); } @@ -308,13 +308,13 @@ impl StacksNode { for op in block_commits.into_iter() { if op.txid == block_snapshot.winning_block_txid { info!( - "Received burnchain block #{} including block_commit_op (winning) - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op (winning) - {} ({})", + op.apparent_sender, &op.block_header_hash ); } else if self.is_miner { info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op - {} ({})", + op.apparent_sender, &op.block_header_hash ); } } @@ -359,7 +359,7 @@ impl StacksNode { } pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) { - info!("Activated VRF key; saving to {}", path); + info!("Activated VRF key; saving to {path}"); let Ok(key_json) = serde_json::to_string(&activated_key) else { warn!("Failed to serialize VRF key"); @@ -369,15 +369,15 @@ pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { - warn!("Failed to create {}: {:?}", &path, &e); + warn!("Failed to create {path}: {e:?}"); return; } }; if let Err(e) = f.write_all(key_json.as_bytes()) { - warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); + warn!("Failed to write activated VRF key to {path}: {e:?}"); return; } - info!("Saved activated VRF key to {}", &path); + info!("Saved activated VRF key to {path}"); } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 042df70be1..1ab9e77f97 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -356,7 +356,7 @@ impl BlockMinerThread { // try again, in case a new sortition is pending self.globals - .raise_initiative(format!("MiningFailure: {:?}", &e)); + .raise_initiative(format!("MiningFailure: {e:?}")); return Err(NakamotoNodeError::MiningFailure( ChainstateError::MinerAborted, )); @@ -648,14 +648,14 @@ impl BlockMinerThread { } let block_id = block.block_id(); - debug!("Broadcasting block {}", &block_id); + debug!("Broadcasting block {block_id}"); if let Err(e) = self.p2p_handle.broadcast_message( vec![], StacksMessageType::NakamotoBlocks(NakamotoBlocksData { blocks: vec![block.clone()], }), ) { - warn!("Failed to broadcast block {}: {:?}", &block_id, &e); + warn!("Failed to broadcast block {block_id}: {e:?}"); } Ok(()) } @@ -801,7 +801,7 @@ impl BlockMinerThread { // load up stacks chain tip let (stacks_tip_ch, stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()).map_err(|e| { - error!("Failed to load canonical Stacks tip: {:?}", &e); + error!("Failed to load canonical Stacks tip: {e:?}"); NakamotoNodeError::ParentNotFound })?; @@ -813,8 +813,8 @@ impl BlockMinerThread { ) .map_err(|e| { error!( - "Could not query header info for tenure tip {} off of {}: {:?}", - &self.burn_election_block.consensus_hash, &stacks_tip_block_id, &e + "Could not query header info for tenure tip {} off of {stacks_tip_block_id}: {e:?}", + &self.burn_election_block.consensus_hash ); NakamotoNodeError::ParentNotFound })?; @@ -842,8 +842,8 @@ impl BlockMinerThread { NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) .map_err(|e| { error!( - "Could not query header for parent tenure ID {}: {:?}", - &self.parent_tenure_id, &e + "Could not query header for parent tenure ID {}: {e:?}", + &self.parent_tenure_id ); NakamotoNodeError::ParentNotFound })? @@ -858,7 +858,7 @@ impl BlockMinerThread { &parent_tenure_header.consensus_hash, ) .map_err(|e| { - error!("Could not query parent tenure finish block: {:?}", &e); + error!("Could not query parent tenure finish block: {e:?}"); NakamotoNodeError::ParentNotFound })?; if let Some(header) = header_opt { @@ -872,8 +872,8 @@ impl BlockMinerThread { NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) .map_err(|e| { error!( - "Could not query header info for epoch2x tenure block ID {}: {:?}", - &self.parent_tenure_id, &e + "Could not query header info for epoch2x tenure block ID {}: {e:?}", + &self.parent_tenure_id ); NakamotoNodeError::ParentNotFound })? @@ -888,9 +888,8 @@ impl BlockMinerThread { }; debug!( - "Miner: stacks tip parent header is {} {:?}", - &stacks_tip_header.index_block_hash(), - &stacks_tip_header + "Miner: stacks tip parent header is {} {stacks_tip_header:?}", + &stacks_tip_header.index_block_hash() ); let miner_address = self .keychain @@ -974,8 +973,8 @@ impl BlockMinerThread { NakamotoChainState::get_block_header(chain_state.db(), &x.header.parent_block_id) .map_err(|e| { error!( - "Could not query header info for parent block ID {}: {:?}", - &x.header.parent_block_id, &e + "Could not query header info for parent block ID {}: {e:?}", + &x.header.parent_block_id ); NakamotoNodeError::ParentNotFound })? diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 78deb69b9f..185ef9d6c0 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -243,8 +243,7 @@ impl PeerThread { let poll_ms = if !download_backpressure && self.net.has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( - "P2P: backpressure: {}, more downloads: {}", - download_backpressure, + "P2P: backpressure: {download_backpressure}, more downloads: {}", self.net.has_more_downloads() ); 1 @@ -321,7 +320,7 @@ impl PeerThread { Err(e) => { // this is only reachable if the network is not instantiated correctly -- // i.e. you didn't connect it - panic!("P2P: Failed to process network dispatch: {:?}", &e); + panic!("P2P: Failed to process network dispatch: {e:?}"); } }; diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 441d7ecd2c..2967729340 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -475,7 +475,7 @@ impl RelayerThread { .expect("FATAL: failed to query sortition DB"); if cur_sn.consensus_hash != consensus_hash { - info!("Relayer: Current sortition {} is ahead of processed sortition {}; taking no action", &cur_sn.consensus_hash, consensus_hash); + info!("Relayer: Current sortition {} is ahead of processed sortition {consensus_hash}; taking no action", &cur_sn.consensus_hash); self.globals .raise_initiative("process_sortition".to_string()); return Ok(None); @@ -571,15 +571,13 @@ impl RelayerThread { ) .map_err(|e| { error!( - "Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", - &stacks_tip, &e + "Relayer: Failed to get tenure-start block header for stacks tip {stacks_tip}: {e:?}" ); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { error!( - "Relayer: Failed to find tenure-start block header for stacks tip {}", - &stacks_tip + "Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip}" ); NakamotoNodeError::ParentNotFound })?; @@ -592,17 +590,11 @@ impl RelayerThread { tip_block_ch, ) .map_err(|e| { - error!( - "Failed to load VRF proof for {} off of {}: {:?}", - tip_block_ch, &stacks_tip, &e - ); + error!("Failed to load VRF proof for {tip_block_ch} off of {stacks_tip}: {e:?}"); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { - error!( - "No block VRF proof for {} off of {}", - tip_block_ch, &stacks_tip - ); + error!("No block VRF proof for {tip_block_ch} off of {stacks_tip}"); NakamotoNodeError::ParentNotFound })?; @@ -615,7 +607,7 @@ impl RelayerThread { &self.burnchain, ) .map_err(|e| { - error!("Relayer: Failure fetching recipient set: {:?}", e); + error!("Relayer: Failure fetching recipient set: {e:?}"); NakamotoNodeError::SnapshotNotFoundForChainTip })?; @@ -759,8 +751,7 @@ impl RelayerThread { if burn_chain_tip != burn_header_hash { debug!( - "Relayer: Drop stale RunTenure for {}: current sortition is for {}", - &burn_header_hash, &burn_chain_tip + "Relayer: Drop stale RunTenure for {burn_header_hash}: current sortition is for {burn_chain_tip}" ); self.globals.counters.bump_missed_tenures(); return Err(NakamotoNodeError::MissedMiningOpportunity); @@ -820,14 +811,14 @@ impl RelayerThread { .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { if let Err(e) = new_miner_state.run_miner(prior_tenure_thread) { - info!("Miner thread failed: {:?}", &e); + info!("Miner thread failed: {e:?}"); Err(e) } else { Ok(()) } }) .map_err(|e| { - error!("Relayer: Failed to start tenure thread: {:?}", &e); + error!("Relayer: Failed to start tenure thread: {e:?}"); NakamotoNodeError::SpawnError(e) })?; debug!( @@ -853,7 +844,7 @@ impl RelayerThread { .name(format!("tenure-stop-{}", self.local_peer.data_url)) .spawn(move || BlockMinerThread::stop_miner(&globals, prior_tenure_thread)) .map_err(|e| { - error!("Relayer: Failed to spawn a stop-tenure thread: {:?}", &e); + error!("Relayer: Failed to spawn a stop-tenure thread: {e:?}"); NakamotoNodeError::SpawnError(e) })?; @@ -956,7 +947,7 @@ impl RelayerThread { return true; } Err(e) => { - warn!("Relayer: process_sortition returned {:?}", &e); + warn!("Relayer: process_sortition returned {e:?}"); return false; } }; @@ -1034,14 +1025,13 @@ impl RelayerThread { let (cur_stacks_tip_ch, cur_stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap_or_else( |e| { - panic!("Failed to load canonical stacks tip: {:?}", &e); + panic!("Failed to load canonical stacks tip: {e:?}"); }, ); if cur_stacks_tip_ch != tip_block_ch || cur_stacks_tip_bh != tip_block_bh { info!( - "Stacks tip changed prior to commit: {}/{} != {}/{}", - &cur_stacks_tip_ch, &cur_stacks_tip_bh, &tip_block_ch, &tip_block_bh + "Stacks tip changed prior to commit: {cur_stacks_tip_ch}/{cur_stacks_tip_bh} != {tip_block_ch}/{tip_block_bh}" ); return Err(NakamotoNodeError::StacksTipChanged); } @@ -1051,16 +1041,12 @@ impl RelayerThread { &StacksBlockId::new(&tip_block_ch, &tip_block_bh), ) .map_err(|e| { - warn!( - "Relayer: failed to load tip {}/{}: {:?}", - &tip_block_ch, &tip_block_bh, &e - ); + warn!("Relayer: failed to load tip {tip_block_ch}/{tip_block_bh}: {e:?}"); NakamotoNodeError::ParentNotFound })? .map(|header| header.stacks_block_height) else { warn!( - "Relayer: failed to load height for tip {}/{} (got None)", - &tip_block_ch, &tip_block_bh + "Relayer: failed to load height for tip {tip_block_ch}/{tip_block_bh} (got None)" ); return Err(NakamotoNodeError::ParentNotFound); }; @@ -1132,7 +1118,7 @@ impl RelayerThread { // load up canonical sortition and stacks tips let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()).map_err(|e| { - error!("Failed to load canonical sortition tip: {:?}", &e); + error!("Failed to load canonical sortition tip: {e:?}"); e }) else { @@ -1142,7 +1128,7 @@ impl RelayerThread { // NOTE: this may be an epoch2x tip let Ok((stacks_tip_ch, stacks_tip_bh)) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).map_err(|e| { - error!("Failed to load canonical stacks tip: {:?}", &e); + error!("Failed to load canonical stacks tip: {e:?}"); e }) else { @@ -1247,25 +1233,19 @@ impl RelayerThread { let mut f = match fs::File::open(path) { Ok(f) => f, Err(e) => { - warn!("Could not open {}: {:?}", &path, &e); + warn!("Could not open {path}: {e:?}"); return None; } }; let mut registered_key_bytes = vec![]; if let Err(e) = f.read_to_end(&mut registered_key_bytes) { - warn!( - "Failed to read registered key bytes from {}: {:?}", - path, &e - ); + warn!("Failed to read registered key bytes from {path}: {e:?}"); return None; } let Ok(registered_key) = serde_json::from_slice::(®istered_key_bytes) else { - warn!( - "Did not load registered key from {}: could not decode JSON", - &path - ); + warn!("Did not load registered key from {path}: could not decode JSON"); return None; }; @@ -1275,7 +1255,7 @@ impl RelayerThread { return None; } - info!("Loaded registered key from {}", &path); + info!("Loaded registered key from {path}"); Some(registered_key) } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b2f892e1f1..14eeef20b9 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -91,7 +91,7 @@ impl SignCoordinator { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { error!("Could not initialize signing coordinator for reward set without signer"); - debug!("reward set: {:?}", &reward_set); + debug!("reward set: {reward_set:?}"); return Err(ChainstateError::NoRegisteredSigners(0)); }; @@ -357,9 +357,8 @@ impl SignCoordinator { .get_nakamoto_block(&block.block_id()) .map_err(|e| { warn!( - "Failed to query chainstate for block {}: {:?}", - &block.block_id(), - &e + "Failed to query chainstate for block {}: {e:?}", + &block.block_id() ); e }) @@ -551,8 +550,7 @@ impl SignCoordinator { }; responded_signers.insert(rejected_pubkey); debug!( - "Signer {} rejected our block {}/{}", - slot_id, + "Signer {slot_id} rejected our block {}/{}", &block.header.consensus_hash, &block.header.block_hash() ); @@ -564,8 +562,7 @@ impl SignCoordinator { > self.total_weight { debug!( - "{}/{} signers vote to reject our block {}/{}", - total_reject_weight, + "{total_reject_weight}/{} signers vote to reject our block {}/{}", self.total_weight, &block.header.consensus_hash, &block.header.block_hash() diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index efc64bf8e7..1f6e8fa1e9 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -310,10 +310,7 @@ pub(crate) fn fault_injection_long_tenure() { error!("Parse error for STX_TEST_SLOW_TENURE"); panic!(); }; - info!( - "Fault injection: sleeping for {} milliseconds to simulate a long tenure", - tenure_time - ); + info!("Fault injection: sleeping for {tenure_time} milliseconds to simulate a long tenure"); stacks_common::util::sleep_ms(tenure_time); } @@ -578,10 +575,7 @@ impl MicroblockMinerThread { // This is an artifact of the way the MARF is built (see #1449) let sortdb = SortitionDB::open(&burn_db_path, true, burnchain.pox_constants) .map_err(|e| { - error!( - "Relayer: Could not open sortdb '{}' ({:?}); skipping tenure", - &burn_db_path, &e - ); + error!("Relayer: Could not open sortdb '{burn_db_path}' ({e:?}); skipping tenure"); e }) .ok()?; @@ -589,8 +583,7 @@ impl MicroblockMinerThread { let mut chainstate = open_chainstate_with_faults(&config) .map_err(|e| { error!( - "Relayer: Could not open chainstate '{}' ({:?}); skipping microblock tenure", - &stacks_chainstate_path, &e + "Relayer: Could not open chainstate '{stacks_chainstate_path}' ({e:?}); skipping microblock tenure" ); e }) @@ -612,10 +605,7 @@ impl MicroblockMinerThread { .. } = miner_tip; - debug!( - "Relayer: Instantiate microblock mining state off of {}/{}", - &ch, &bhh - ); + debug!("Relayer: Instantiate microblock mining state off of {ch}/{bhh}"); // we won a block! proceed to build a microblock tail if we've stored it match StacksChainState::get_anchored_block_header_info(chainstate.db(), &ch, &bhh) { @@ -664,17 +654,11 @@ impl MicroblockMinerThread { }) } Ok(None) => { - warn!( - "Relayer: No such anchored block: {}/{}. Cannot mine microblocks", - ch, bhh - ); + warn!("Relayer: No such anchored block: {ch}/{bhh}. Cannot mine microblocks"); None } Err(e) => { - warn!( - "Relayer: Failed to get anchored block cost for {}/{}: {:?}", - ch, bhh, &e - ); + warn!("Relayer: Failed to get anchored block cost for {ch}/{bhh}: {e:?}"); None } } @@ -726,7 +710,7 @@ impl MicroblockMinerThread { let block_snapshot = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &self.parent_consensus_hash) .map_err(|e| { - error!("Failed to find block snapshot for mined block: {}", e); + error!("Failed to find block snapshot for mined block: {e}"); e })? .ok_or_else(|| { @@ -736,13 +720,13 @@ impl MicroblockMinerThread { let burn_height = block_snapshot.block_height; let ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), burn_height).map_err(|e| { - error!("Failed to get AST rules for microblock: {}", e); + error!("Failed to get AST rules for microblock: {e}"); e })?; let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), burn_height) .map_err(|e| { - error!("Failed to get epoch for microblock: {}", e); + error!("Failed to get epoch for microblock: {e}"); e })? .expect("FATAL: no epoch defined") @@ -762,10 +746,10 @@ impl MicroblockMinerThread { Ok(x) => x, Err(e) => { let msg = format!( - "Failed to create a microblock miner at chaintip {}/{}: {:?}", - &self.parent_consensus_hash, &self.parent_block_hash, &e + "Failed to create a microblock miner at chaintip {}/{}: {e:?}", + &self.parent_consensus_hash, &self.parent_block_hash ); - error!("{}", msg); + error!("{msg}"); return Err(e); } }; @@ -794,7 +778,7 @@ impl MicroblockMinerThread { let (mined_microblock, new_cost) = match mint_result { Ok(x) => x, Err(e) => { - warn!("Failed to mine microblock: {}", e); + warn!("Failed to mine microblock: {e}"); return Err(e); } }; @@ -819,23 +803,23 @@ impl MicroblockMinerThread { // record this microblock somewhere if fs::metadata(&path).is_err() { fs::create_dir_all(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path}'")); } let path = Path::new(&path); let path = path.join(Path::new(&format!("{}", &mined_microblock.block_hash()))); let mut file = fs::File::create(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{:?}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path:?}'")); let mblock_bits = mined_microblock.serialize_to_vec(); let mblock_bits_hex = to_hex(&mblock_bits); let mblock_json = format!( - r#"{{"microblock":"{}","parent_consensus":"{}","parent_block":"{}"}}"#, - &mblock_bits_hex, &self.parent_consensus_hash, &self.parent_block_hash + r#"{{"microblock":"{mblock_bits_hex}","parent_consensus":"{}","parent_block":"{}"}}"#, + &self.parent_consensus_hash, &self.parent_block_hash ); file.write_all(mblock_json.as_bytes()).unwrap_or_else(|_| { - panic!("FATAL: failed to write microblock bits to '{:?}'", &path) + panic!("FATAL: failed to write microblock bits to '{path:?}'") }); info!( "Fault injection: bad microblock {} saved to {}", @@ -933,11 +917,11 @@ impl MicroblockMinerThread { info!("Will keep polling mempool for transactions to include in a microblock"); } Err(e) => { - warn!("Failed to mine one microblock: {:?}", &e); + warn!("Failed to mine one microblock: {e:?}"); } } } else { - debug!("Will not mine microblocks yet -- have {} attachable blocks that arrived in the last 10 minutes", num_attachable); + debug!("Will not mine microblocks yet -- have {num_attachable} attachable blocks that arrived in the last 10 minutes"); } self.last_mined = get_epoch_time_ms(); @@ -1435,8 +1419,7 @@ impl BlockMinerThread { { // This leaf does not confirm a previous-best-tip, so assign it the // worst-possible score. - info!("Tip #{} {}/{} at {}:{} conflicts with a previous best-tip {}/{} at {}:{}", - i, + info!("Tip #{i} {}/{} at {}:{} conflicts with a previous best-tip {}/{} at {}:{}", &leaf_tip.consensus_hash, &leaf_tip.anchored_block_hash, leaf_tip.burn_height, @@ -1496,13 +1479,11 @@ impl BlockMinerThread { } info!( - "Tip #{} {}/{} at {}:{} has score {} ({})", - i, + "Tip #{i} {}/{} at {}:{} has score {score} ({})", &leaf_tip.consensus_hash, &leaf_tip.anchored_block_hash, leaf_tip.burn_height, leaf_tip.stacks_height, - score, score_summaries.join(" + ").to_string() ); if score < u64::MAX { @@ -1527,8 +1508,8 @@ impl BlockMinerThread { .expect("FATAL: candidates should not be empty"); info!( - "Best tip is #{} {}/{}", - best_tip_idx, &best_tip.consensus_hash, &best_tip.anchored_block_hash + "Best tip is #{best_tip_idx} {}/{}", + &best_tip.consensus_hash, &best_tip.anchored_block_hash ); Some((*best_tip).clone()) } @@ -1690,9 +1671,9 @@ impl BlockMinerThread { if !force { // the chain tip hasn't changed since we attempted to build a block. Use what we // already have. - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no new microblocks ({} <= {} + 1)", + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, and no new microblocks ({} <= {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); return None; } @@ -1701,24 +1682,24 @@ impl BlockMinerThread { // TODO: only consider rebuilding our anchored block if we (a) have // time, and (b) the new microblocks are worth more than the new BTC // fee minus the old BTC fee - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, but there are new microblocks ({} > {} + 1)", + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, but there are new microblocks ({} > {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); best_attempt = cmp::max(best_attempt, prev_block.attempt); } } else if !force { // no microblock stream to confirm, and the stacks tip hasn't changed - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, and no microblocks present", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height); + prev_block.anchored_block.txs.len(), prev_block.burn_hash); return None; } } else if self.burn_block.burn_header_hash == prev_block.burn_hash { // only try and re-mine if there was no sortition since the last chain tip - info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", - parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); + info!("Relayer: Stacks tip has changed to {parent_consensus_hash}/{} since we last tried to mine a block in {} at burn height {parent_block_burn_height}; attempt was {} (for Stacks tip {}/{})", + stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); best_attempt = cmp::max(best_attempt, prev_block.attempt); // Since the chain tip has changed, we should try to mine a new block, even // if it has less transactions than the previous block we mined, since that @@ -1726,7 +1707,7 @@ impl BlockMinerThread { max_txs = 0; } else { info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", - &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); + &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); } } (best_attempt + 1, max_txs) @@ -1822,9 +1803,7 @@ impl BlockMinerThread { Ok(x) => { let num_mblocks = x.as_ref().map(|(mblocks, ..)| mblocks.len()).unwrap_or(0); debug!( - "Loaded {} microblocks descending from {}/{} (data: {})", - num_mblocks, - parent_consensus_hash, + "Loaded {num_mblocks} microblocks descending from {parent_consensus_hash}/{} (data: {})", &stacks_parent_header.anchored_header.block_hash(), x.is_some() ); @@ -1832,10 +1811,8 @@ impl BlockMinerThread { } Err(e) => { warn!( - "Failed to load descendant microblock stream from {}/{}: {:?}", - parent_consensus_hash, - &stacks_parent_header.anchored_header.block_hash(), - &e + "Failed to load descendant microblock stream from {parent_consensus_hash}/{}: {e:?}", + &stacks_parent_header.anchored_header.block_hash() ); None } @@ -1855,7 +1832,7 @@ impl BlockMinerThread { stacks_parent_header.microblock_tail = microblocks.last().map(|blk| blk.header.clone()); if let Some(poison_payload) = poison_opt { - debug!("Detected poisoned microblock fork: {:?}", &poison_payload); + debug!("Detected poisoned microblock fork: {poison_payload:?}"); // submit it multiple times with different nonces, so it'll have a good chance of // eventually getting picked up (even if the miner sends other transactions from @@ -1877,15 +1854,9 @@ impl BlockMinerThread { Some(&self.event_dispatcher), 1_000_000_000.0, // prioritize this for inclusion ) { - warn!( - "Detected but failed to mine poison-microblock transaction: {:?}", - &e - ); + warn!("Detected but failed to mine poison-microblock transaction: {e:?}"); } else { - debug!( - "Submit poison-microblock transaction {:?}", - &poison_microblock_tx - ); + debug!("Submit poison-microblock transaction {poison_microblock_tx:?}"); } } } @@ -1918,7 +1889,7 @@ impl BlockMinerThread { } btc_addrs .into_iter() - .map(|addr| format!("{}", &addr)) + .map(|addr| format!("{addr}")) .collect() } @@ -1951,7 +1922,7 @@ impl BlockMinerThread { }; let Ok(tip) = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).map_err(|e| { - warn!("Failed to load canonical burn chain tip: {:?}", &e); + warn!("Failed to load canonical burn chain tip: {e:?}"); e }) else { return config_file_burn_fee_cap; @@ -1959,10 +1930,7 @@ impl BlockMinerThread { let tip = if let Some(at_burn_block) = at_burn_block.as_ref() { let ih = sortdb.index_handle(&tip.sortition_id); let Ok(Some(ancestor_tip)) = ih.get_block_snapshot_by_height(*at_burn_block) else { - warn!( - "Failed to load ancestor tip at burn height {}", - at_burn_block - ); + warn!("Failed to load ancestor tip at burn height {at_burn_block}"); return config_file_burn_fee_cap; }; ancestor_tip @@ -1972,7 +1940,7 @@ impl BlockMinerThread { let Ok(active_miners_and_commits) = MinerStats::get_active_miners(sortdb, at_burn_block) .map_err(|e| { - warn!("Failed to get active miners: {:?}", &e); + warn!("Failed to get active miners: {e:?}"); e }) else { @@ -1988,12 +1956,12 @@ impl BlockMinerThread { .map(|(miner, _cmt)| miner.as_str()) .collect(); - info!("Active miners: {:?}", &active_miners); + info!("Active miners: {active_miners:?}"); let Ok(unconfirmed_block_commits) = miner_stats .get_unconfirmed_commits(tip.block_height + 1, &active_miners) .map_err(|e| { - warn!("Failed to find unconfirmed block-commits: {}", &e); + warn!("Failed to find unconfirmed block-commits: {e}"); e }) else { @@ -2005,10 +1973,7 @@ impl BlockMinerThread { .map(|cmt| (cmt.apparent_sender.to_string(), cmt.burn_fee)) .collect(); - info!( - "Found unconfirmed block-commits: {:?}", - &unconfirmed_miners_and_amounts - ); + info!("Found unconfirmed block-commits: {unconfirmed_miners_and_amounts:?}"); let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( &active_miners_and_commits, @@ -2034,7 +1999,7 @@ impl BlockMinerThread { at_burn_block, ) .map_err(|e| { - warn!("Failed to get unconfirmed burn distribution: {:?}", &e); + warn!("Failed to get unconfirmed burn distribution: {e:?}"); e }) else { @@ -2044,10 +2009,10 @@ impl BlockMinerThread { MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; - info!("Unconfirmed spend distribution: {:?}", &spend_dist); + info!("Unconfirmed spend distribution: {spend_dist:?}"); info!( - "Unconfirmed win probabilities (fast_rampup={}): {:?}", - miner_config.fast_rampup, &win_probs + "Unconfirmed win probabilities (fast_rampup={}): {win_probs:?}", + miner_config.fast_rampup ); let miner_addrs = Self::get_miner_addrs(config, keychain); @@ -2058,8 +2023,8 @@ impl BlockMinerThread { .unwrap_or(0.0); info!( - "This miner's win probability at {} is {}", - tip.block_height, &win_prob + "This miner's win probability at {} is {win_prob}", + tip.block_height ); set_prior_winning_prob(tip.block_height, win_prob); @@ -2082,8 +2047,7 @@ impl BlockMinerThread { let prior_win_prob = get_prior_winning_prob(prior_burn_height); if prior_win_prob < config.miner.target_win_probability { info!( - "Miner underperformed in block {} ({}/{})", - prior_burn_height, underperformed_count, underperform_stop_threshold + "Miner underperformed in block {prior_burn_height} ({underperformed_count}/{underperform_stop_threshold})" ); underperformed_count += 1; } @@ -2126,7 +2090,7 @@ impl BlockMinerThread { ) { Ok(x) => x, Err(e) => { - error!("Relayer: Failure fetching recipient set: {:?}", e); + error!("Relayer: Failure fetching recipient set: {e:?}"); return None; } }; @@ -2533,10 +2497,7 @@ impl BlockMinerThread { if cfg!(test) { if let Ok(mblock_pubkey_hash_str) = std::env::var("STACKS_MICROBLOCK_PUBKEY_HASH") { if let Ok(bad_pubkh) = Hash160::from_hex(&mblock_pubkey_hash_str) { - debug!( - "Fault injection: set microblock public key hash to {}", - &bad_pubkh - ); + debug!("Fault injection: set microblock public key hash to {bad_pubkh}"); pubkh = bad_pubkh } } @@ -2621,13 +2582,13 @@ impl BlockMinerThread { ) { Ok(block) => block, Err(e) => { - error!("Relayer: Failure mining anchor block even after removing offending microblock {}: {}", &mblock_header_hash, &e); + error!("Relayer: Failure mining anchor block even after removing offending microblock {mblock_header_hash}: {e}"); return None; } } } Err(e) => { - error!("Relayer: Failure mining anchored block: {}", e); + error!("Relayer: Failure mining anchored block: {e}"); return None; } }; @@ -2646,12 +2607,12 @@ impl BlockMinerThread { if miner_config.only_increase_tx_count && max_txs > u64::try_from(anchored_block.txs.len()).expect("too many txs") { - info!("Relayer: Succeeded assembling subsequent block with {} txs, but had previously produced a block with {} txs", anchored_block.txs.len(), max_txs); + info!("Relayer: Succeeded assembling subsequent block with {} txs, but had previously produced a block with {max_txs} txs", anchored_block.txs.len()); return None; } info!( - "Relayer: Succeeded assembling {} block #{}: {}, with {} txs, attempt {}", + "Relayer: Succeeded assembling {} block #{}: {}, with {} txs, attempt {attempt}", if parent_block_info.parent_block_total_burn == 0 { "Genesis" } else { @@ -2659,8 +2620,7 @@ impl BlockMinerThread { }, anchored_block.header.total_work.work, anchored_block.block_hash(), - anchored_block.txs.len(), - attempt + anchored_block.txs.len() ); // let's commit @@ -2777,7 +2737,7 @@ impl BlockMinerThread { return None; } Err(e) => { - warn!("Relayer: Failed to submit Bitcoin transaction: {:?}", e); + warn!("Relayer: Failed to submit Bitcoin transaction: {e:?}"); self.failed_to_submit_last_attempt = true; return None; } @@ -3075,7 +3035,7 @@ impl RelayerThread { let burn_height = SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), consensus_hash) .map_err(|e| { - error!("Failed to find block snapshot for mined block: {}", e); + error!("Failed to find block snapshot for mined block: {e}"); e })? .ok_or_else(|| { @@ -3108,22 +3068,20 @@ impl RelayerThread { // record this block somewhere if fs::metadata(&path).is_err() { fs::create_dir_all(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path}'")); } let path = Path::new(&path); let path = path.join(Path::new(&format!("{}", &anchored_block.block_hash()))); let mut file = fs::File::create(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{:?}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path:?}'")); let block_bits = anchored_block.serialize_to_vec(); let block_bits_hex = to_hex(&block_bits); - let block_json = format!( - r#"{{"block":"{}","consensus":"{}"}}"#, - &block_bits_hex, &consensus_hash - ); + let block_json = + format!(r#"{{"block":"{block_bits_hex}","consensus":"{consensus_hash}"}}"#); file.write_all(block_json.as_bytes()).unwrap_or_else(|_| { - panic!("FATAL: failed to write block bits to '{:?}'", &path) + panic!("FATAL: failed to write block bits to '{path:?}'") }); info!( "Fault injection: bad block {} saved to {}", @@ -3233,8 +3191,8 @@ impl RelayerThread { .expect("FATAL: unknown consensus hash"); debug!( - "Relayer: Process tenure {}/{} in {} burn height {}", - &consensus_hash, &block_header_hash, &burn_hash, sn.block_height + "Relayer: Process tenure {consensus_hash}/{block_header_hash} in {burn_hash} burn height {}", + sn.block_height ); if let Some((last_mined_block_data, microblock_privkey)) = @@ -3251,8 +3209,7 @@ impl RelayerThread { let reward_block_height = mined_block.header.total_work.work + MINER_REWARD_MATURITY; info!( - "Relayer: Won sortition! Mining reward will be received in {} blocks (block #{})", - MINER_REWARD_MATURITY, reward_block_height + "Relayer: Won sortition! Mining reward will be received in {MINER_REWARD_MATURITY} blocks (block #{reward_block_height})" ); debug!("Relayer: Won sortition!"; "stacks_header" => %block_header_hash, @@ -3271,7 +3228,7 @@ impl RelayerThread { return (false, None); } Err(e) => { - warn!("Error processing my tenure, bad block produced: {}", e); + warn!("Error processing my tenure, bad block produced: {e}"); warn!( "Bad block"; "stacks_header" => %block_header_hash, @@ -3293,7 +3250,7 @@ impl RelayerThread { }; if let Err(e) = self.relayer.advertize_blocks(blocks_available, block_data) { - warn!("Failed to advertise new block: {}", e); + warn!("Failed to advertise new block: {e}"); } let snapshot = SortitionDB::get_block_snapshot_consensus( @@ -3305,8 +3262,7 @@ impl RelayerThread { if !snapshot.pox_valid { warn!( - "Snapshot for {} is no longer valid; discarding {}...", - &consensus_hash, + "Snapshot for {consensus_hash} is no longer valid; discarding {}...", &mined_block.block_hash() ); miner_tip = Self::pick_higher_tip(miner_tip, None); @@ -3329,7 +3285,7 @@ impl RelayerThread { .relayer .broadcast_block(snapshot.consensus_hash, mined_block) { - warn!("Failed to push new block: {}", e); + warn!("Failed to push new block: {e}"); } } @@ -3352,8 +3308,7 @@ impl RelayerThread { } } else { debug!( - "Relayer: Did not win sortition in {}, winning block was {}/{}", - &burn_hash, &consensus_hash, &block_header_hash + "Relayer: Did not win sortition in {burn_hash}, winning block was {consensus_hash}/{block_header_hash}" ); miner_tip = None; } @@ -3488,11 +3443,9 @@ impl RelayerThread { || mtip.block_hash != stacks_tip_block_hash { debug!( - "Relayer: miner tip {}/{} is NOT canonical ({}/{})", + "Relayer: miner tip {}/{} is NOT canonical ({stacks_tip_consensus_hash}/{stacks_tip_block_hash})", &mtip.consensus_hash, &mtip.block_hash, - &stacks_tip_consensus_hash, - &stacks_tip_block_hash ); miner_tip = None; } else { @@ -3553,10 +3506,7 @@ impl RelayerThread { let best_tip = Self::pick_higher_tip(my_miner_tip.clone(), new_miner_tip.clone()); if best_tip == new_miner_tip && best_tip != my_miner_tip { // tip has changed - debug!( - "Relayer: Best miner tip went from {:?} to {:?}", - &my_miner_tip, &new_miner_tip - ); + debug!("Relayer: Best miner tip went from {my_miner_tip:?} to {new_miner_tip:?}"); self.microblock_stream_cost = ExecutionCost::zero(); } self.miner_tip = best_tip; @@ -3656,14 +3606,14 @@ impl RelayerThread { for (stacks_bhh, (assembled_block, microblock_privkey)) in last_mined_blocks.into_iter() { if assembled_block.burn_block_height < burn_height { debug!( - "Stale mined block: {} (as of {},{})", - &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height + "Stale mined block: {stacks_bhh} (as of {},{})", + &assembled_block.burn_hash, assembled_block.burn_block_height ); continue; } debug!( - "Mined block in-flight: {} (as of {},{})", - &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height + "Mined block in-flight: {stacks_bhh} (as of {},{})", + &assembled_block.burn_hash, assembled_block.burn_block_height ); ret.insert(stacks_bhh, (assembled_block, microblock_privkey)); } @@ -3728,8 +3678,7 @@ impl RelayerThread { if burn_chain_tip != burn_header_hash { debug!( - "Relayer: Drop stale RunTenure for {}: current sortition is for {}", - &burn_header_hash, &burn_chain_tip + "Relayer: Drop stale RunTenure for {burn_header_hash}: current sortition is for {burn_chain_tip}" ); self.globals.counters.bump_missed_tenures(); return None; @@ -3745,8 +3694,7 @@ impl RelayerThread { ); if has_unprocessed { debug!( - "Relayer: Drop RunTenure for {} because there are fewer than {} pending blocks", - &burn_header_hash, + "Relayer: Drop RunTenure for {burn_header_hash} because there are fewer than {} pending blocks", self.burnchain.pox_constants.prepare_length - 1 ); return None; @@ -3776,7 +3724,7 @@ impl RelayerThread { // if we're still mining on this burn block, then do nothing if self.miner_thread.is_some() { - debug!("Relayer: will NOT run tenure since miner thread is already running for burn tip {}", &burn_chain_tip); + debug!("Relayer: will NOT run tenure since miner thread is already running for burn tip {burn_chain_tip}"); return None; } @@ -3824,7 +3772,7 @@ impl RelayerThread { .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { if let Err(e) = miner_thread_state.send_mock_miner_messages() { - warn!("Failed to send mock miner messages: {}", e); + warn!("Failed to send mock miner messages: {e}"); } miner_thread_state.run_tenure() }) @@ -3947,10 +3895,7 @@ impl RelayerThread { let parent_consensus_hash = &miner_tip.consensus_hash; let parent_block_hash = &miner_tip.block_hash; - debug!( - "Relayer: Run microblock tenure for {}/{}", - parent_consensus_hash, parent_block_hash - ); + debug!("Relayer: Run microblock tenure for {parent_consensus_hash}/{parent_block_hash}"); let Some(mut microblock_thread_state) = MicroblockMinerThread::from_relayer_thread(self) else { @@ -4031,11 +3976,9 @@ impl RelayerThread { .set_ongoing_commit(ongoing_commit_opt); debug!( - "Relayer: RunTenure finished at {} (in {}ms) targeting {} (originally {})", + "Relayer: RunTenure finished at {} (in {}ms) targeting {bhh} (originally {orig_bhh})", self.last_tenure_issue_time, - self.last_tenure_issue_time.saturating_sub(tenure_begin), - &bhh, - &orig_bhh + self.last_tenure_issue_time.saturating_sub(tenure_begin) ); // this stacks block confirms all in-flight microblocks we know about, @@ -4064,11 +4007,9 @@ impl RelayerThread { ); info!( - "Mined one microblock: {} seq {} txs {} (total processed: {})", - µblock_hash, + "Mined one microblock: {microblock_hash} seq {} txs {} (total processed: {num_mblocks})", next_microblock.header.sequence, - next_microblock.txs.len(), - num_mblocks + next_microblock.txs.len() ); self.globals.counters.set_microblocks_processed(num_mblocks); @@ -4088,8 +4029,7 @@ impl RelayerThread { next_microblock, ) { error!( - "Failure trying to broadcast microblock {}: {}", - microblock_hash, e + "Failure trying to broadcast microblock {microblock_hash}: {e}" ); } @@ -4114,7 +4054,7 @@ impl RelayerThread { self.mined_stacks_block = false; } Err(e) => { - warn!("Relayer: Failed to mine next microblock: {:?}", &e); + warn!("Relayer: Failed to mine next microblock: {e:?}"); // switch back to block mining self.mined_stacks_block = false; @@ -4155,28 +4095,22 @@ impl RelayerThread { let mut f = match fs::File::open(path) { Ok(f) => f, Err(e) => { - warn!("Could not open {}: {:?}", &path, &e); + warn!("Could not open {path}: {e:?}"); return None; } }; let mut registered_key_bytes = vec![]; if let Err(e) = f.read_to_end(&mut registered_key_bytes) { - warn!( - "Failed to read registered key bytes from {}: {:?}", - path, &e - ); + warn!("Failed to read registered key bytes from {path}: {e:?}"); return None; } let Ok(registered_key) = serde_json::from_slice(®istered_key_bytes) else { - warn!( - "Did not load registered key from {}: could not decode JSON", - &path - ); + warn!("Did not load registered key from {path}: could not decode JSON"); return None; }; - info!("Loaded registered key from {}", &path); + info!("Loaded registered key from {path}"); Some(registered_key) } @@ -4335,9 +4269,9 @@ impl ParentStacksBlockInfo { return Err(Error::BurnchainTipChanged); } - debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {} (height {} hash {})", + debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {mine_tip_ch} (height {} hash {})", &check_burn_block.consensus_hash, check_burn_block.block_height, &check_burn_block.burn_header_hash, - mine_tip_ch, parent_snapshot.block_height, &parent_snapshot.burn_header_hash); + parent_snapshot.block_height, &parent_snapshot.burn_header_hash); let coinbase_nonce = { let principal = miner_address.into(); @@ -4349,8 +4283,7 @@ impl ParentStacksBlockInfo { ) .unwrap_or_else(|| { panic!( - "BUG: stacks tip block {}/{} no longer exists after we queried it", - mine_tip_ch, mine_tip_bh + "BUG: stacks tip block {mine_tip_ch}/{mine_tip_bh} no longer exists after we queried it" ) }); account.nonce @@ -4545,8 +4478,7 @@ impl PeerThread { let poll_ms = if !download_backpressure && self.get_network().has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( - "P2P: backpressure: {}, more downloads: {}", - download_backpressure, + "P2P: backpressure: {download_backpressure}, more downloads: {}", self.get_network().has_more_downloads() ); 1 @@ -4630,7 +4562,7 @@ impl PeerThread { Err(e) => { // this is only reachable if the network is not instantiated correctly -- // i.e. you didn't connect it - panic!("P2P: Failed to process network dispatch: {:?}", &e); + panic!("P2P: Failed to process network dispatch: {e:?}"); } }; @@ -4692,9 +4624,8 @@ impl StacksNode { pub(crate) fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { info!( - "Override burnchain height of {:?} to {}", - ASTRules::PrecheckSize, - ast_precheck_size_height + "Override burnchain height of {:?} to {ast_precheck_size_height}", + ASTRules::PrecheckSize ); let mut tx = sortdb .tx_begin() @@ -4782,11 +4713,7 @@ impl StacksNode { stackerdb_contract_ids, ) .map_err(|e| { - eprintln!( - "Failed to open {}: {:?}", - &config.get_peer_db_file_path(), - &e - ); + eprintln!("Failed to open {}: {e:?}", &config.get_peer_db_file_path()); panic!(); }) .unwrap(); @@ -5035,7 +4962,7 @@ impl StacksNode { .get_miner_address(StacksEpochId::Epoch21, &public_key); let miner_addr_str = addr2str(&miner_addr); let _ = monitoring::set_burnchain_signer(BurnchainSigner(miner_addr_str)).map_err(|e| { - warn!("Failed to set global burnchain signer: {:?}", &e); + warn!("Failed to set global burnchain signer: {e:?}"); e }); } @@ -5259,14 +5186,14 @@ impl StacksNode { for op in block_commits.into_iter() { if op.txid == block_snapshot.winning_block_txid { info!( - "Received burnchain block #{} including block_commit_op (winning) - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op (winning) - {} ({})", + op.apparent_sender, &op.block_header_hash ); last_sortitioned_block = Some((block_snapshot.clone(), op.vtxindex)); } else if self.is_miner { info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op - {} ({})", + op.apparent_sender, &op.block_header_hash ); } } @@ -5280,8 +5207,7 @@ impl StacksNode { let num_key_registers = key_registers.len(); debug!( - "Processed burnchain state at height {}: {} leader keys, {} block-commits (ibd = {})", - block_height, num_key_registers, num_block_commits, ibd + "Processed burnchain state at height {block_height}: {num_key_registers} leader keys, {num_block_commits} block-commits (ibd = {ibd})" ); // save the registered VRF key @@ -5297,7 +5223,7 @@ impl StacksNode { return ret; }; - info!("Activated VRF key; saving to {}", &path); + info!("Activated VRF key; saving to {path}"); let Ok(key_json) = serde_json::to_string(&activated_key) else { warn!("Failed to serialize VRF key"); @@ -5307,17 +5233,17 @@ impl StacksNode { let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { - warn!("Failed to create {}: {:?}", &path, &e); + warn!("Failed to create {path}: {e:?}"); return ret; } }; if let Err(e) = f.write_all(key_json.as_bytes()) { - warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); + warn!("Failed to write activated VRF key to {path}: {e:?}"); return ret; } - info!("Saved activated VRF key to {}", &path); + info!("Saved activated VRF key to {path}"); ret } diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 8aebd4814a..e2c920ce67 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -191,7 +191,7 @@ fn spawn_peer( let sortdb = match SortitionDB::open(&burn_db_path, false, pox_consts.clone()) { Ok(x) => x, Err(e) => { - warn!("Error while connecting burnchain db in peer loop: {}", e); + warn!("Error while connecting burnchain db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -204,7 +204,7 @@ fn spawn_peer( ) { Ok(x) => x, Err(e) => { - warn!("Error while connecting chainstate db in peer loop: {}", e); + warn!("Error while connecting chainstate db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -222,7 +222,7 @@ fn spawn_peer( ) { Ok(x) => x, Err(e) => { - warn!("Error while connecting to mempool db in peer loop: {}", e); + warn!("Error while connecting to mempool db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -319,9 +319,8 @@ impl Node { let (chain_state, receipts) = match chain_state_result { Ok(res) => res, Err(err) => panic!( - "Error while opening chain state at path {}: {:?}", - config.get_chainstate_path_str(), - err + "Error while opening chain state at path {}: {err:?}", + config.get_chainstate_path_str() ), }; @@ -419,7 +418,7 @@ impl Node { let initial_neighbors = self.config.node.bootstrap_node.clone(); - println!("BOOTSTRAP WITH {:?}", initial_neighbors); + println!("BOOTSTRAP WITH {initial_neighbors:?}"); let rpc_sock: SocketAddr = self.config.node.rpc_bind.parse().unwrap_or_else(|_| { @@ -785,15 +784,13 @@ impl Node { ) .unwrap_or_else(|_| { panic!( - "BUG: could not query chainstate to find parent consensus hash of {}/{}", - consensus_hash, + "BUG: could not query chainstate to find parent consensus hash of {consensus_hash}/{}", &anchored_block.block_hash() ) }) .unwrap_or_else(|| { panic!( - "BUG: no such parent of block {}/{}", - consensus_hash, + "BUG: no such parent of block {consensus_hash}/{}", &anchored_block.block_hash() ) }); @@ -848,7 +845,7 @@ impl Node { ) }; match process_blocks_at_tip { - Err(e) => panic!("Error while processing block - {:?}", e), + Err(e) => panic!("Error while processing block - {e:?}"), Ok(ref mut blocks) => { if blocks.is_empty() { break; diff --git a/testnet/stacks-node/src/operations.rs b/testnet/stacks-node/src/operations.rs index 0109077a5f..7e26fb42e2 100644 --- a/testnet/stacks-node/src/operations.rs +++ b/testnet/stacks-node/src/operations.rs @@ -43,7 +43,7 @@ impl BurnchainOpSigner { let signature = match self.secret_key.sign(hash) { Ok(r) => r, Err(e) => { - debug!("Secret key error: {:?}", &e); + debug!("Secret key error: {e:?}"); return None; } }; diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index ce4c06a16c..7990c04332 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -132,7 +132,7 @@ impl RunLoopCallbacks { match &tx.payload { TransactionPayload::Coinbase(..) => println!(" Coinbase"), TransactionPayload::SmartContract(contract, ..) => println!(" Publish smart contract\n**************************\n{:?}\n**************************", contract.code_body), - TransactionPayload::TokenTransfer(recipent, amount, _) => println!(" Transfering {} µSTX to {}", amount, recipent), + TransactionPayload::TokenTransfer(recipent, amount, _) => println!(" Transfering {amount} µSTX to {recipent}"), _ => println!(" {:?}", tx.payload) } } diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index de836568d2..16f5a12b2d 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -168,7 +168,7 @@ impl RunLoop { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); if let Err(e) = burnchain.create_wallet_if_dne() { - warn!("Error when creating wallet: {:?}", e); + warn!("Error when creating wallet: {e:?}"); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -429,7 +429,7 @@ impl RunLoop { return; } Err(e) => { - error!("Error initializing burnchain: {}", e); + error!("Error initializing burnchain: {e}"); info!("Exiting stacks-node"); return; } @@ -522,10 +522,7 @@ impl RunLoop { burnchain.get_headers_height() - 1, ); - debug!( - "Runloop: Begin main runloop starting a burnchain block {}", - sortition_db_height - ); + debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; let mut poll_deadline = 0; @@ -573,11 +570,10 @@ impl RunLoop { // runloop will cause the PoX sync watchdog to wait until it believes that the node has // obtained all the Stacks blocks it can. debug!( - "Runloop: Download burnchain blocks up to reward cycle #{} (height {})", + "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})", burnchain_config .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: target burnchain block height does not have a reward cycle"), - target_burnchain_block_height; + .expect("FATAL: target burnchain block height does not have a reward cycle"); "total_burn_sync_percent" => %percent, "local_burn_height" => burnchain_tip.block_snapshot.block_height, "remote_tip_height" => remote_chain_height @@ -598,7 +594,7 @@ impl RunLoop { match burnchain.sync(Some(target_burnchain_block_height)) { Ok(x) => x, Err(e) => { - warn!("Runloop: Burnchain controller stopped: {}", e); + warn!("Runloop: Burnchain controller stopped: {e}"); continue; } }; @@ -612,15 +608,13 @@ impl RunLoop { if next_sortition_height != last_tenure_sortition_height { info!( - "Runloop: Downloaded burnchain blocks up to height {}; target height is {}; remote_chain_height = {} next_sortition_height = {}, sortition_db_height = {}", - burnchain_height, target_burnchain_block_height, remote_chain_height, next_sortition_height, sortition_db_height + "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}" ); } if next_sortition_height > sortition_db_height { debug!( - "Runloop: New burnchain block height {} > {}", - next_sortition_height, sortition_db_height + "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); let mut sort_count = 0; @@ -666,8 +660,7 @@ impl RunLoop { num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {} from {} (chain tip height is {}); {} sortitions", - next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" ); sortition_db_height = next_sortition_height; @@ -699,7 +692,7 @@ impl RunLoop { remote_chain_height, ); - debug!("Runloop: Advance target burnchain block height from {} to {} (sortition height {})", target_burnchain_block_height, next_target_burnchain_block_height, sortition_db_height); + debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); target_burnchain_block_height = next_target_burnchain_block_height; if sortition_db_height >= burnchain_height && !ibd { @@ -709,9 +702,7 @@ impl RunLoop { .unwrap_or(0); if canonical_stacks_tip_height < mine_start { info!( - "Runloop: Synchronized full burnchain, but stacks tip height is {}, and we are trying to boot to {}, not mining until reaching chain tip", - canonical_stacks_tip_height, - mine_start + "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip" ); } else { // once we've synced to the chain tip once, don't apply this check again. @@ -722,13 +713,11 @@ impl RunLoop { if last_tenure_sortition_height != sortition_db_height { if is_miner { info!( - "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", - sortition_db_height + "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks" ); } else { info!( - "Runloop: Synchronized full burnchain up to height {}.", - sortition_db_height + "Runloop: Synchronized full burnchain up to height {sortition_db_height}." ); } last_tenure_sortition_height = sortition_db_height; diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 7be8939d9e..dd64fb5685 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -342,7 +342,7 @@ impl RunLoop { } } _ => { - let msg = format!("Graceful termination request received (signal `{}`), will complete the ongoing runloop cycles and terminate\n", sig_id); + let msg = format!("Graceful termination request received (signal `{sig_id}`), will complete the ongoing runloop cycles and terminate\n"); async_safe_write_stderr(&msg); keep_running_writer.store(false, Ordering::SeqCst); } @@ -353,7 +353,7 @@ impl RunLoop { if cfg!(test) || allow_err { info!("Error setting up signal handler, may have already been set"); } else { - panic!("FATAL: error setting termination handler - {}", e); + panic!("FATAL: error setting termination handler - {e}"); } } } @@ -370,7 +370,7 @@ impl RunLoop { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); if let Err(e) = burnchain.create_wallet_if_dne() { - warn!("Error when creating wallet: {:?}", e); + warn!("Error when creating wallet: {e:?}"); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -461,7 +461,7 @@ impl RunLoop { panic!(); } Err(e) => { - panic!("FATAL: unable to query filesystem or databases: {:?}", &e); + panic!("FATAL: unable to query filesystem or databases: {e:?}"); } } @@ -475,13 +475,13 @@ impl RunLoop { Some(burnchain_tip) => { // database exists already, and has blocks -- just sync to its tip. let target_height = burnchain_tip.block_height + 1; - debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {}", burnchain_tip.block_height, target_height); + debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {target_height}", burnchain_tip.block_height); target_height } None => { // database does not exist yet let target_height = 1.max(burnchain_config.first_block_height + 1); - debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {}", target_height); + debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {target_height}"); target_height } }; @@ -492,16 +492,16 @@ impl RunLoop { if matches!(e, Error::CoordinatorClosed) && !should_keep_running.load(Ordering::SeqCst) { - info!("Shutdown initiated during burnchain initialization: {}", e); + info!("Shutdown initiated during burnchain initialization: {e}"); return burnchain_error::ShutdownInitiated; } - error!("Burnchain controller stopped: {}", e); + error!("Burnchain controller stopped: {e}"); panic!(); })?; // if the chainstate DBs don't exist, this will instantiate them if let Err(e) = burnchain_controller.connect_dbs() { - error!("Failed to connect to burnchain databases: {}", e); + error!("Failed to connect to burnchain databases: {e}"); panic!(); }; @@ -739,7 +739,7 @@ impl RunLoop { ) { Ok(am) => am, Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); + warn!("Failed to find heaviest affirmation map: {e:?}"); return; } }; @@ -755,7 +755,7 @@ impl RunLoop { match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { Ok(am) => am, Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); + warn!("Failed to find sortition affirmation map: {e:?}"); return; } }; @@ -781,26 +781,24 @@ impl RunLoop { .find_divergence(&heaviest_affirmation_map) .is_some() { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map); + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})"); globals.coord().announce_new_burn_block(); } else if highest_sn.block_height == sn.block_height && sn.block_height == canonical_burnchain_tip.block_height { // need to force an affirmation reorg because there will be no more burn block // announcements. - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, burn height {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, sn.block_height); + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map}, burn height {})", sn.block_height); globals.coord().announce_new_burn_block(); } debug!( - "Drive stacks block processing: possible PoX reorg (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map + "Drive stacks block processing: possible PoX reorg (stacks tip: {stacks_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})" ); globals.coord().announce_new_stacks_block(); } else { debug!( - "Drive stacks block processing: no need (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map + "Drive stacks block processing: no need (stacks tip: {stacks_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})" ); // announce a new stacks block to force the chains coordinator @@ -871,7 +869,7 @@ impl RunLoop { match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { Ok(am) => am, Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); + warn!("Failed to find sortition affirmation map: {e:?}"); return; } }; @@ -887,7 +885,7 @@ impl RunLoop { ) { Ok(am) => am, Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); + warn!("Failed to find heaviest affirmation map: {e:?}"); return; } }; @@ -902,7 +900,7 @@ impl RunLoop { ) { Ok(am) => am, Err(e) => { - warn!("Failed to find canonical affirmation map: {:?}", &e); + warn!("Failed to find canonical affirmation map: {e:?}"); return; } }; @@ -913,7 +911,7 @@ impl RunLoop { .is_some() || sn.block_height < highest_sn.block_height { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, {} = (heaviest_affirmation_map.len() as u64) { // we have unaffirmed PoX anchor blocks that are not yet processed in the sortition history - debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {}, heaviest: {}, canonical: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, &canonical_affirmation_map); + debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map}, canonical: {canonical_affirmation_map})"); globals.coord().announce_new_burn_block(); globals.coord().announce_new_stacks_block(); *last_announce_time = get_epoch_time_secs().into(); @@ -933,9 +931,7 @@ impl RunLoop { } } else { debug!( - "Drive burn block processing: no need (sortition tip: {}, heaviest: {}, {} { - error!("Error initializing burnchain: {}", e); + error!("Error initializing burnchain: {e}"); info!("Exiting stacks-node"); return None; } @@ -1160,10 +1156,7 @@ impl RunLoop { burnchain.get_headers_height() - 1, ); - debug!( - "Runloop: Begin main runloop starting a burnchain block {}", - sortition_db_height - ); + debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; @@ -1202,7 +1195,7 @@ impl RunLoop { ) { Ok(ibd) => ibd, Err(e) => { - debug!("Runloop: PoX sync wait routine aborted: {:?}", e); + debug!("Runloop: PoX sync wait routine aborted: {e:?}"); continue; } }; @@ -1223,11 +1216,10 @@ impl RunLoop { // runloop will cause the PoX sync watchdog to wait until it believes that the node has // obtained all the Stacks blocks it can. debug!( - "Runloop: Download burnchain blocks up to reward cycle #{} (height {})", + "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})", burnchain_config .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: target burnchain block height does not have a reward cycle"), - target_burnchain_block_height; + .expect("FATAL: target burnchain block height does not have a reward cycle"); "total_burn_sync_percent" => %percent, "local_burn_height" => burnchain_tip.block_snapshot.block_height, "remote_tip_height" => remote_chain_height @@ -1242,7 +1234,7 @@ impl RunLoop { match burnchain.sync(Some(target_burnchain_block_height)) { Ok(x) => x, Err(e) => { - warn!("Runloop: Burnchain controller stopped: {}", e); + warn!("Runloop: Burnchain controller stopped: {e}"); continue; } }; @@ -1256,15 +1248,13 @@ impl RunLoop { if next_sortition_height != last_tenure_sortition_height { info!( - "Runloop: Downloaded burnchain blocks up to height {}; target height is {}; remote_chain_height = {} next_sortition_height = {}, sortition_db_height = {}", - burnchain_height, target_burnchain_block_height, remote_chain_height, next_sortition_height, sortition_db_height + "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}" ); } if next_sortition_height > sortition_db_height { debug!( - "Runloop: New burnchain block height {} > {}", - next_sortition_height, sortition_db_height + "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); let mut sort_count = 0; @@ -1337,8 +1327,7 @@ impl RunLoop { num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {} from {} (chain tip height is {}); {} sortitions", - next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" ); sortition_db_height = next_sortition_height; @@ -1370,7 +1359,7 @@ impl RunLoop { remote_chain_height, ); - debug!("Runloop: Advance target burnchain block height from {} to {} (sortition height {})", target_burnchain_block_height, next_target_burnchain_block_height, sortition_db_height); + debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); target_burnchain_block_height = next_target_burnchain_block_height; if sortition_db_height >= burnchain_height && !ibd { @@ -1380,9 +1369,7 @@ impl RunLoop { .unwrap_or(0); if canonical_stacks_tip_height < mine_start { info!( - "Runloop: Synchronized full burnchain, but stacks tip height is {}, and we are trying to boot to {}, not mining until reaching chain tip", - canonical_stacks_tip_height, - mine_start + "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip" ); } else { // once we've synced to the chain tip once, don't apply this check again. @@ -1393,8 +1380,7 @@ impl RunLoop { // at tip, and not downloading. proceed to mine. if last_tenure_sortition_height != sortition_db_height { info!( - "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", - sortition_db_height + "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks" ); last_tenure_sortition_height = sortition_db_height; } diff --git a/testnet/stacks-node/src/stacks_events.rs b/testnet/stacks-node/src/stacks_events.rs index 2f96bbfe66..d7ec349466 100644 --- a/testnet/stacks-node/src/stacks_events.rs +++ b/testnet/stacks-node/src/stacks_events.rs @@ -22,10 +22,7 @@ fn main() { if help { println!("Usage: stacks-events [--addr=]"); - println!( - " --addr= Address to listen on (default: {})", - DEFAULT_ADDR - ); + println!(" --addr= Address to listen on (default: {DEFAULT_ADDR})",); return; } @@ -34,7 +31,7 @@ fn main() { fn serve_for_events(addr: &String) { let listener = TcpListener::bind(addr).unwrap(); - eprintln!("Listening on {}", addr); + eprintln!("Listening on {addr}"); for stream in listener.incoming() { let stream = stream.unwrap(); handle_connection(stream); @@ -82,14 +79,13 @@ fn handle_connection(mut stream: TcpStream) { "path": path.unwrap(), "payload": payload_json, }); - println!("{}", record); + println!("{record}"); { let contents = "Thanks!"; let response = format!( - "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{}", - contents.len(), - contents + "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{contents}", + contents.len() ); let _nmb_bytes = stream.write(response.as_bytes()).unwrap(); diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index d4c05ec7fe..395d829c8f 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -180,8 +180,7 @@ impl PoxSyncWatchdog { Ok(cs) => cs, Err(e) => { return Err(format!( - "Failed to open chainstate at '{}': {:?}", - &chainstate_path, &e + "Failed to open chainstate at '{chainstate_path}': {e:?}" )); } }; @@ -217,7 +216,7 @@ impl PoxSyncWatchdog { self.max_staging, self.last_attachable_query, ) - .map_err(|e| format!("Failed to count attachable staging blocks: {:?}", &e))?; + .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; self.last_attachable_query = get_epoch_time_secs(); Ok(cnt) @@ -233,7 +232,7 @@ impl PoxSyncWatchdog { self.max_staging, self.last_processed_query, ) - .map_err(|e| format!("Failed to count attachable staging blocks: {:?}", &e))?; + .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; self.last_processed_query = get_epoch_time_secs(); Ok(cnt) @@ -250,13 +249,13 @@ impl PoxSyncWatchdog { last_processed_height + (burnchain.stable_confirmations as u64) < burnchain_height; if ibd { debug!( - "PoX watchdog: {} + {} < {}, so initial block download", - last_processed_height, burnchain.stable_confirmations, burnchain_height + "PoX watchdog: {last_processed_height} + {} < {burnchain_height}, so initial block download", + burnchain.stable_confirmations ); } else { debug!( - "PoX watchdog: {} + {} >= {}, so steady-state", - last_processed_height, burnchain.stable_confirmations, burnchain_height + "PoX watchdog: {last_processed_height} + {} >= {burnchain_height}, so steady-state", + burnchain.stable_confirmations ); } ibd @@ -344,7 +343,7 @@ impl PoxSyncWatchdog { ) -> f64 { let this_reward_cycle = burnchain .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {}", tip_height)); + .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); let prev_reward_cycle = this_reward_cycle.saturating_sub(1); let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); @@ -372,7 +371,7 @@ impl PoxSyncWatchdog { ) -> f64 { let this_reward_cycle = burnchain .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {}", tip_height)); + .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); let prev_reward_cycle = this_reward_cycle.saturating_sub(1); let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); @@ -459,10 +458,7 @@ impl PoxSyncWatchdog { } if self.unconditionally_download { - debug!( - "PoX watchdog set to unconditionally download (ibd={})", - ibbd - ); + debug!("PoX watchdog set to unconditionally download (ibd={ibbd})"); self.relayer_comms.set_ibd(ibbd); return Ok(ibbd); } @@ -561,7 +557,7 @@ impl PoxSyncWatchdog { && get_epoch_time_secs() < expected_first_block_deadline { // still waiting for that first block in this reward cycle - debug!("PoX watchdog: Still warming up: waiting until {}s for first Stacks block download (estimated download time: {}s)...", expected_first_block_deadline, self.estimated_block_download_time); + debug!("PoX watchdog: Still warming up: waiting until {expected_first_block_deadline}s for first Stacks block download (estimated download time: {}s)...", self.estimated_block_download_time); sleep_ms(PER_SAMPLE_WAIT_MS); continue; } @@ -596,8 +592,8 @@ impl PoxSyncWatchdog { let (flat_processed, processed_deviants) = PoxSyncWatchdog::is_mostly_flat(&processed_delta, 0); - debug!("PoX watchdog: flat-attachable?: {}, flat-processed?: {}, estimated block-download time: {}s, estimated block-processing time: {}s", - flat_attachable, flat_processed, self.estimated_block_download_time, self.estimated_block_process_time); + debug!("PoX watchdog: flat-attachable?: {flat_attachable}, flat-processed?: {flat_processed}, estimated block-download time: {}s, estimated block-processing time: {}s", + self.estimated_block_download_time, self.estimated_block_process_time); if flat_attachable && flat_processed && self.last_block_processed_ts == 0 { // we're flat-lining -- this may be the end of this cycle @@ -607,8 +603,8 @@ impl PoxSyncWatchdog { if self.last_block_processed_ts > 0 && get_epoch_time_secs() < expected_last_block_deadline { - debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{})s before burnchain synchronization (estimated block-processing time: {}s)", - get_epoch_time_secs() + 1, expected_last_block_deadline, self.estimated_block_process_time); + debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{expected_last_block_deadline})s before burnchain synchronization (estimated block-processing time: {}s)", + get_epoch_time_secs() + 1, self.estimated_block_process_time); sleep_ms(PER_SAMPLE_WAIT_MS); continue; } @@ -617,8 +613,7 @@ impl PoxSyncWatchdog { // doing initial burnchain block download right now. // only proceed to fetch the next reward cycle's burnchain blocks if we're neither downloading nor // attaching blocks recently - debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {}, flat-processed = {}, min-attachable: {}, min-processed: {}", - flat_attachable, flat_processed, &attachable_deviants, &processed_deviants); + debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {flat_attachable}, flat-processed = {flat_processed}, min-attachable: {attachable_deviants}, min-processed: {processed_deviants}"); if !flat_attachable || !flat_processed { sleep_ms(PER_SAMPLE_WAIT_MS); @@ -645,7 +640,7 @@ impl PoxSyncWatchdog { } (err_attach, err_processed) => { // can only happen on DB query failure - error!("PoX watchdog: Failed to count recently attached ('{:?}') and/or processed ('{:?}') staging blocks", &err_attach, &err_processed); + error!("PoX watchdog: Failed to count recently attached ('{err_attach:?}') and/or processed ('{err_processed:?}') staging blocks"); panic!(); } }; diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 702f6d5953..3e69ac18cc 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -118,8 +118,7 @@ impl BitcoinCoreController { } } else { return Err(BitcoinCoreError::StopFailed(format!( - "Invalid response: {:?}", - res + "Invalid response: {res:?}" ))); } } diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index ebe14bae16..1ad23db5e1 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -128,7 +128,7 @@ fn advance_to_2_1( btc_regtest_controller.bootstrap_chain(1); let mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); - debug!("Mining pubkey is {}", &mining_pubkey); + debug!("Mining pubkey is {mining_pubkey}"); btc_regtest_controller.set_mining_pubkey(MINER_BURN_PUBLIC_KEY.to_string()); mining_pubkey @@ -136,7 +136,7 @@ fn advance_to_2_1( btc_regtest_controller.bootstrap_chain(1); let mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); - debug!("Mining pubkey is {}", &mining_pubkey); + debug!("Mining pubkey is {mining_pubkey}"); btc_regtest_controller.set_mining_pubkey(MINER_BURN_PUBLIC_KEY.to_string()); btc_regtest_controller.bootstrap_chain(1); @@ -154,8 +154,8 @@ fn advance_to_2_1( .get_all_utxos(&Secp256k1PublicKey::from_hex(&mining_pubkey).unwrap()); eprintln!( - "UTXOs for {} (segwit={}): {:?}", - &mining_pubkey, conf.miner.segwit, &utxos + "UTXOs for {mining_pubkey} (segwit={}): {utxos:?}", + conf.miner.segwit ); assert_eq!(utxos.len(), 1); @@ -198,8 +198,8 @@ fn advance_to_2_1( let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!( - "\nPoX info at {}\n{:?}\n\n", - tip_info.burn_block_height, &pox_info + "\nPoX info at {}\n{pox_info:?}\n\n", + tip_info.burn_block_height ); // this block is the epoch transition? @@ -217,7 +217,7 @@ fn advance_to_2_1( ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip @@ -225,7 +225,6 @@ fn advance_to_2_1( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, tip_info.burn_block_height, - res ); if tip_info.burn_block_height >= epoch_2_1 { @@ -252,7 +251,7 @@ fn advance_to_2_1( true, ) .unwrap_err(); - eprintln!("No pox-2: {}", &e); + eprintln!("No pox-2: {e}"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -410,7 +409,7 @@ fn transition_adds_burn_block_height() { for event in events.iter() { if let Some(cev) = event.get("contract_event") { // strip leading `0x` - eprintln!("{:#?}", &cev); + eprintln!("{cev:#?}"); let clarity_serialized_value = hex_bytes( str::from_utf8( &cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..], @@ -734,15 +733,14 @@ fn transition_fixes_bitcoin_rigidity() { ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height >= epoch_2_1 { @@ -778,7 +776,7 @@ fn transition_fixes_bitcoin_rigidity() { true, ) .unwrap_err(); - eprintln!("No pox-2: {}", &e); + eprintln!("No pox-2: {e}"); // costs-3 should NOT be initialized let e = get_contract_src( @@ -788,7 +786,7 @@ fn transition_fixes_bitcoin_rigidity() { true, ) .unwrap_err(); - eprintln!("No costs-3: {}", &e); + eprintln!("No costs-3: {e}"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1093,8 +1091,7 @@ fn transition_adds_get_pox_addr_recipients() { let spender_sk = spender_sks[i]; let pox_addr_tuple = execute( &format!( - "{{ hashbytes: 0x{}, version: 0x{:02x} }}", - pox_pubkey_hash, + "{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x{:02x} }}", &(*addr_variant as u8) ), ClarityVersion::Clarity2, @@ -1136,7 +1133,7 @@ fn transition_adds_get_pox_addr_recipients() { } }; let pox_addr_tuple = execute( - &format!("{{ hashbytes: 0x{}, version: 0x{:02x} }}", &bytes, &version), + &format!("{{ hashbytes: 0x{bytes}, version: 0x{version:02x} }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -1192,7 +1189,7 @@ fn transition_adds_get_pox_addr_recipients() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); test_observer::clear(); // mine through two reward cycles @@ -1200,7 +1197,7 @@ fn transition_adds_get_pox_addr_recipients() { while sort_height < stack_sort_height + (((2 * pox_constants.reward_cycle_length) + 1) as u64) { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = coord_channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let cc_tx = make_contract_call( @@ -1285,16 +1282,16 @@ fn transition_adds_get_pox_addr_recipients() { .unwrap(); // NOTE: there's an even number of payouts here, so this works - eprintln!("payout at {} = {}", burn_block_height, &payout); + eprintln!("payout at {burn_block_height} = {payout}"); if pox_constants.is_in_prepare_phase(0, burn_block_height) { // in prepare phase - eprintln!("{} in prepare phase", burn_block_height); + eprintln!("{burn_block_height} in prepare phase"); assert_eq!(payout, conf.burnchain.burn_fee_cap as u128); assert_eq!(pox_addr_tuples.len(), 1); } else { // in reward phase - eprintln!("{} in reward phase", burn_block_height); + eprintln!("{burn_block_height} in reward phase"); assert_eq!( payout, (conf.burnchain.burn_fee_cap / (OUTPUTS_PER_COMMIT as u64)) @@ -1309,7 +1306,7 @@ fn transition_adds_get_pox_addr_recipients() { .unwrap_or_else(|| { panic!("FATAL: invalid PoX tuple {pox_addr_value:?}") }); - eprintln!("at {}: {:?}", burn_block_height, &pox_addr); + eprintln!("at {burn_block_height}: {pox_addr:?}"); if !pox_addr.is_burn() { found_pox_addrs.insert(pox_addr); } @@ -1321,14 +1318,14 @@ fn transition_adds_get_pox_addr_recipients() { } } - eprintln!("found pox addrs: {:?}", &found_pox_addrs); + eprintln!("found pox addrs: {found_pox_addrs:?}"); assert_eq!(found_pox_addrs.len(), 7); for addr in found_pox_addrs .into_iter() .map(|addr| Value::Tuple(addr.as_clarity_tuple().unwrap())) { - eprintln!("Contains: {:?}", &addr); + eprintln!("Contains: {addr:?}"); assert!(expected_pox_addrs.contains(&addr.to_string())); } } @@ -1391,7 +1388,7 @@ fn transition_adds_mining_from_segwit() { ); if let Some(BitcoinAddress::Segwit(SegwitBitcoinAddress::P2WPKH(..))) = &utxo_addr { } else { - panic!("UTXO address was {:?}", &utxo_addr); + panic!("UTXO address was {utxo_addr:?}"); } } @@ -1424,7 +1421,7 @@ fn transition_adds_mining_from_segwit() { let txid = commits[0].txid; let tx = btc_regtest_controller.get_raw_transaction(&txid); - eprintln!("tx = {:?}", &tx); + eprintln!("tx = {tx:?}"); assert_eq!(tx.input[0].witness.len(), 2); let addr = BitcoinAddress::try_from_segwit( false, @@ -1577,7 +1574,7 @@ fn transition_removes_pox_sunset() { &[ Value::UInt(first_bal as u128 - 260 * 3), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -1591,18 +1588,18 @@ fn transition_removes_pox_sunset() { submit_tx(&http_origin, &tx); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {}", sort_height); + eprintln!("Sort height pox-1: {sort_height}"); // advance to next reward cycle for _i in 0..(reward_cycle_len * 2 + 2) { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {} <= {}", sort_height, epoch_21); + eprintln!("Sort height pox-1: {sort_height} <= {epoch_21}"); } // pox must activate let pox_info = get_pox_info(&http_origin).unwrap(); - eprintln!("pox_info in pox-1 = {:?}", &pox_info); + eprintln!("pox_info in pox-1 = {pox_info:?}"); assert!(pox_info.current_cycle.is_pox_active); assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); @@ -1610,7 +1607,7 @@ fn transition_removes_pox_sunset() { while sort_height <= epoch_21 + 1 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {} <= {}", sort_height, epoch_21); + eprintln!("Sort height pox-1: {sort_height} <= {epoch_21}"); } let pox_info = get_pox_info(&http_origin).unwrap(); @@ -1618,7 +1615,7 @@ fn transition_removes_pox_sunset() { // pox is still "active" despite unlock, because there's enough participation, and also even // though the v1 block height has passed, the pox-2 contract won't be managing reward sets // until the next reward cycle - eprintln!("pox_info in pox-2 = {:?}", &pox_info); + eprintln!("pox_info in pox-2 = {pox_info:?}"); assert!(pox_info.current_cycle.is_pox_active); assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox-2"); @@ -1634,7 +1631,7 @@ fn transition_removes_pox_sunset() { &[ Value::UInt(first_bal as u128 - 260 * 3), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -1651,10 +1648,7 @@ fn transition_removes_pox_sunset() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!( - "Sort height pox-1 to pox-2 with stack-stx to pox-2: {}", - sort_height - ); + eprintln!("Sort height pox-1 to pox-2 with stack-stx to pox-2: {sort_height}"); let pox_info = get_pox_info(&http_origin).unwrap(); assert!(pox_info.current_cycle.is_pox_active); @@ -1663,11 +1657,11 @@ fn transition_removes_pox_sunset() { while sort_height <= epoch_21 + reward_cycle_len { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-2: {}", sort_height); + eprintln!("Sort height pox-2: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - eprintln!("pox_info = {:?}", &pox_info); + eprintln!("pox_info = {pox_info:?}"); assert!(pox_info.current_cycle.is_pox_active); // first full reward cycle with pox-2 @@ -1839,8 +1833,8 @@ fn transition_empty_blocks() { let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!( - "\nPoX info at {}\n{:?}\n\n", - tip_info.burn_block_height, &pox_info + "\nPoX info at {}\n{pox_info:?}\n\n", + tip_info.burn_block_height ); // this block is the epoch transition? @@ -1858,15 +1852,14 @@ fn transition_empty_blocks() { ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height == epoch_2_05 || tip_info.burn_block_height == epoch_2_1 { @@ -1965,7 +1958,7 @@ pub fn wait_pox_stragglers(confs: &[Config], max_stacks_tip: u64, block_time_ms: for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.stacks_tip_height < max_stacks_tip { straggler = true; @@ -2095,9 +2088,9 @@ fn test_pox_reorgs_three_flaps() { let rpc_port = 41043 + 10 * i; let p2p_port = 41043 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); // conf.connection_options.inv_reward_cycles = 10; @@ -2111,9 +2104,8 @@ fn test_pox_reorgs_three_flaps() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -2194,7 +2186,7 @@ fn test_pox_reorgs_three_flaps() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -2209,16 +2201,16 @@ fn test_pox_reorgs_three_flaps() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -2249,7 +2241,7 @@ fn test_pox_reorgs_three_flaps() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -2273,7 +2265,7 @@ fn test_pox_reorgs_three_flaps() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -2285,7 +2277,7 @@ fn test_pox_reorgs_three_flaps() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -2296,7 +2288,7 @@ fn test_pox_reorgs_three_flaps() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -2304,7 +2296,7 @@ fn test_pox_reorgs_three_flaps() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -2313,7 +2305,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); } @@ -2328,13 +2320,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2346,20 +2338,20 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2371,7 +2363,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2382,13 +2374,13 @@ fn test_pox_reorgs_three_flaps() { // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2400,7 +2392,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history continues to overtake miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2411,13 +2403,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2429,7 +2421,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0 may have won here, but its affirmation map isn't yet the heaviest. } @@ -2438,13 +2430,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2456,7 +2448,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0's affirmation map now becomes the heaviest. } @@ -2465,13 +2457,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2484,7 +2476,7 @@ fn test_pox_reorgs_three_flaps() { let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0's affirmation map is now the heaviest, and there's no longer a tie. max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); @@ -2498,23 +2490,20 @@ fn test_pox_reorgs_three_flaps() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; max tip is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; max tip is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on affirmation maps for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -2619,9 +2608,9 @@ fn test_pox_reorg_one_flap() { let rpc_port = 41063 + 10 * i; let p2p_port = 41063 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -2633,9 +2622,8 @@ fn test_pox_reorg_one_flap() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -2716,7 +2704,7 @@ fn test_pox_reorg_one_flap() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -2731,16 +2719,16 @@ fn test_pox_reorg_one_flap() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -2771,7 +2759,7 @@ fn test_pox_reorg_one_flap() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -2795,7 +2783,7 @@ fn test_pox_reorg_one_flap() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -2807,7 +2795,7 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -2818,7 +2806,7 @@ fn test_pox_reorg_one_flap() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -2826,7 +2814,7 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -2835,7 +2823,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -2848,13 +2836,13 @@ fn test_pox_reorg_one_flap() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2866,20 +2854,20 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2892,7 +2880,7 @@ fn test_pox_reorg_one_flap() { let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2908,23 +2896,20 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3029,9 +3014,9 @@ fn test_pox_reorg_flap_duel() { let rpc_port = 41083 + 10 * i; let p2p_port = 41083 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -3044,9 +3029,8 @@ fn test_pox_reorg_flap_duel() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3127,7 +3111,7 @@ fn test_pox_reorg_flap_duel() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -3142,16 +3126,16 @@ fn test_pox_reorg_flap_duel() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -3182,7 +3166,7 @@ fn test_pox_reorg_flap_duel() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -3206,7 +3190,7 @@ fn test_pox_reorg_flap_duel() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -3218,7 +3202,7 @@ fn test_pox_reorg_flap_duel() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -3229,7 +3213,7 @@ fn test_pox_reorg_flap_duel() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -3237,7 +3221,7 @@ fn test_pox_reorg_flap_duel() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -3246,7 +3230,7 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); } @@ -3268,13 +3252,13 @@ fn test_pox_reorg_flap_duel() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -3287,20 +3271,20 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -3312,7 +3296,7 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -3329,7 +3313,7 @@ fn test_pox_reorg_flap_duel() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -3339,16 +3323,13 @@ fn test_pox_reorg_flap_duel() { // NOTE: the stacks affirmation maps will differ from the heaviest affirmation map, because the // act of flapping back and forth so much will have caused these nodes to forget about some of // their anchor blocks. This is an artifact of the test. - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3450,9 +3431,9 @@ fn test_pox_reorg_flap_reward_cycles() { let rpc_port = 41123 + 10 * i; let p2p_port = 41123 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -3464,9 +3445,8 @@ fn test_pox_reorg_flap_reward_cycles() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3547,7 +3527,7 @@ fn test_pox_reorg_flap_reward_cycles() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -3562,16 +3542,16 @@ fn test_pox_reorg_flap_reward_cycles() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -3602,7 +3582,7 @@ fn test_pox_reorg_flap_reward_cycles() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -3626,7 +3606,7 @@ fn test_pox_reorg_flap_reward_cycles() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -3638,7 +3618,7 @@ fn test_pox_reorg_flap_reward_cycles() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -3649,7 +3629,7 @@ fn test_pox_reorg_flap_reward_cycles() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -3657,7 +3637,7 @@ fn test_pox_reorg_flap_reward_cycles() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -3666,7 +3646,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -3686,13 +3666,13 @@ fn test_pox_reorg_flap_reward_cycles() { // miner 1 is disabled for this reward cycle signal_mining_blocked(miner_status[1].clone()); for i in 0..20 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } } @@ -3701,7 +3681,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -3709,13 +3689,13 @@ fn test_pox_reorg_flap_reward_cycles() { // miner 0 is disabled for this reward cycle signal_mining_blocked(miner_status[0].clone()); for i in 0..20 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } } signal_mining_ready(miner_status[0].clone()); @@ -3723,7 +3703,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -3740,7 +3720,7 @@ fn test_pox_reorg_flap_reward_cycles() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -3750,16 +3730,13 @@ fn test_pox_reorg_flap_reward_cycles() { // NOTE: the stacks affirmation maps will differ from the heaviest affirmation map, because the // act of flapping back and forth so much will have caused these nodes to forget about some of // their anchor blocks. This is an artifact of the test. - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3864,9 +3841,9 @@ fn test_pox_missing_five_anchor_blocks() { let rpc_port = 41103 + 10 * i; let p2p_port = 41103 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -3878,9 +3855,8 @@ fn test_pox_missing_five_anchor_blocks() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3961,7 +3937,7 @@ fn test_pox_missing_five_anchor_blocks() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -3976,16 +3952,16 @@ fn test_pox_missing_five_anchor_blocks() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -4016,7 +3992,7 @@ fn test_pox_missing_five_anchor_blocks() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -4040,7 +4016,7 @@ fn test_pox_missing_five_anchor_blocks() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -4052,7 +4028,7 @@ fn test_pox_missing_five_anchor_blocks() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -4063,7 +4039,7 @@ fn test_pox_missing_five_anchor_blocks() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -4071,7 +4047,7 @@ fn test_pox_missing_five_anchor_blocks() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -4080,7 +4056,7 @@ fn test_pox_missing_five_anchor_blocks() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -4095,13 +4071,13 @@ fn test_pox_missing_five_anchor_blocks() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {} cycle {}\n\n", i, c); + eprintln!("\n\nBuild block {i} cycle {c}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -4112,7 +4088,7 @@ fn test_pox_missing_five_anchor_blocks() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); @@ -4125,7 +4101,7 @@ fn test_pox_missing_five_anchor_blocks() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -4133,16 +4109,13 @@ fn test_pox_missing_five_anchor_blocks() { // wait for all blocks to propagate. // miner 1 should learn about all of miner 0's blocks - info!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + info!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}",); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -4250,9 +4223,9 @@ fn test_sortition_divergence_pre_21() { let rpc_port = 41113 + 10 * i; let p2p_port = 41113 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -4264,9 +4237,8 @@ fn test_sortition_divergence_pre_21() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -4347,7 +4319,7 @@ fn test_sortition_divergence_pre_21() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -4362,16 +4334,16 @@ fn test_sortition_divergence_pre_21() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -4402,7 +4374,7 @@ fn test_sortition_divergence_pre_21() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -4426,7 +4398,7 @@ fn test_sortition_divergence_pre_21() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -4438,7 +4410,7 @@ fn test_sortition_divergence_pre_21() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -4449,7 +4421,7 @@ fn test_sortition_divergence_pre_21() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -4457,7 +4429,7 @@ fn test_sortition_divergence_pre_21() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -4466,7 +4438,7 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -4481,13 +4453,13 @@ fn test_sortition_divergence_pre_21() { // mine a reward cycle in which the 2.05 rules choose a PoX anchor block, but the 2.1 rules do // not. for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len && i < reward_cycle_len - prepare_phase_len + 3 @@ -4516,26 +4488,26 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } } info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); @@ -4543,13 +4515,13 @@ fn test_sortition_divergence_pre_21() { // run some cycles in 2.1 for _ in 0..2 { for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } } } @@ -4561,23 +4533,20 @@ fn test_sortition_divergence_pre_21() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate. // miner 1 should learn about all of miner 0's blocks - info!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + info!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -4767,8 +4736,7 @@ fn trait_invocation_cross_epoch() { "invoke-simple", "invocation-2", &[Value::Principal(PrincipalData::Contract( - QualifiedContractIdentifier::parse(&format!("{}.{}", &spender_addr_c32, "impl-simple")) - .unwrap(), + QualifiedContractIdentifier::parse(&format!("{spender_addr_c32}.impl-simple")).unwrap(), ))], ); let invoke_2_txid = submit_tx(&http_origin, &tx); @@ -4951,7 +4919,7 @@ fn test_v1_unlock_height_with_current_stackers() { // stack right away let sort_height = channel.get_sortitions_processed() + 1; let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -4972,7 +4940,7 @@ fn test_v1_unlock_height_with_current_stackers() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until epoch 2.1 @@ -4988,7 +4956,7 @@ fn test_v1_unlock_height_with_current_stackers() { let sort_height = channel.get_sortitions_processed() + 1; let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5009,7 +4977,7 @@ fn test_v1_unlock_height_with_current_stackers() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -5043,7 +5011,7 @@ fn test_v1_unlock_height_with_current_stackers() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -5207,7 +5175,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5228,7 +5196,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -5256,7 +5224,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5277,7 +5245,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -5311,7 +5279,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -5323,7 +5291,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); + debug!("Test burnchain height {height}"); if !burnchain_config.is_in_prepare_phase(height) { let mut have_expected_payout = false; if height < epoch_2_1 + (reward_cycle_len as u64) { diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 774a83f712..dabd3ee9ed 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -198,14 +198,14 @@ fn disable_pox() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -227,7 +227,7 @@ fn disable_pox() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -255,7 +255,7 @@ fn disable_pox() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -276,7 +276,7 @@ fn disable_pox() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); let tx = make_contract_call( @@ -295,7 +295,7 @@ fn disable_pox() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -322,7 +322,7 @@ fn disable_pox() { &[Value::UInt(increase_by.into())], ); - info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.1 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..15 { @@ -349,7 +349,7 @@ fn disable_pox() { &[Value::UInt(5000)], ); - info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.1 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // finish the cycle after the 2.2 transition, @@ -395,7 +395,7 @@ fn disable_pox() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -407,7 +407,7 @@ fn disable_pox() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); + debug!("Test burnchain height {height}"); if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { assert_eq!(pox_addrs.len(), 2); let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); @@ -504,14 +504,12 @@ fn disable_pox() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, expected_slots[&reward_cycle][pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -722,14 +720,14 @@ fn pox_2_unlock_all() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -751,7 +749,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -780,7 +778,7 @@ fn pox_2_unlock_all() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -812,7 +810,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -832,7 +830,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -970,7 +968,7 @@ fn pox_2_unlock_all() { 1_000_000, ); - info!("Submit stack transfer tx to {:?}", &http_origin); + info!("Submit stack transfer tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // this wakes up the node to mine the transaction @@ -1063,7 +1061,7 @@ fn pox_2_unlock_all() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -1075,7 +1073,7 @@ fn pox_2_unlock_all() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); + debug!("Test burnchain height {height}"); if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { assert_eq!(pox_addrs.len(), 2); let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); @@ -1154,18 +1152,16 @@ fn pox_2_unlock_all() { let cycle_counts = match reward_cycle_pox_addrs.get(&reward_cycle) { Some(x) => x, None => { - info!("No reward cycle entry = {}", reward_cycle); + info!("No reward cycle entry = {reward_cycle}"); continue; } }; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, expected_slots[&reward_cycle][pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -1335,9 +1331,9 @@ fn test_pox_reorg_one_flap() { let rpc_port = 41063 + 10 * i; let p2p_port = 41063 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -1350,9 +1346,8 @@ fn test_pox_reorg_one_flap() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -1433,7 +1428,7 @@ fn test_pox_reorg_one_flap() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -1448,16 +1443,16 @@ fn test_pox_reorg_one_flap() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -1524,7 +1519,7 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -1535,7 +1530,7 @@ fn test_pox_reorg_one_flap() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -1543,7 +1538,7 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -1552,7 +1547,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -1565,13 +1560,13 @@ fn test_pox_reorg_one_flap() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -1583,20 +1578,20 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -1609,7 +1604,7 @@ fn test_pox_reorg_one_flap() { let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -1625,22 +1620,19 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index a0cbbfe876..12ae11945d 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -227,9 +227,8 @@ fn trait_invocation_behavior() { submit_tx(&http_origin, &publish_invoke); info!( - "At height = {}, epoch-2.1 = {}", - get_chain_info(&conf).burn_block_height, - epoch_2_1 + "At height = {}, epoch-2.1 = {epoch_2_1}", + get_chain_info(&conf).burn_block_height ); // wait until just before epoch 2.1 loop { @@ -509,7 +508,7 @@ fn trait_invocation_behavior() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!("Total spender txs = {}", spender_nonce); + info!("Total spender txs = {spender_nonce}"); let blocks = test_observer::get_blocks(); @@ -630,7 +629,7 @@ fn trait_invocation_behavior() { } for (key, value) in transaction_receipts.iter() { - eprintln!("{} => {} of {}", key, value.0, value.1); + eprintln!("{key} => {} of {}", value.0, value.1); } test_observer::clear(); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index e39255678d..cfcc8d0d52 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -220,14 +220,14 @@ fn fix_to_pox_contract() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -249,7 +249,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -278,7 +278,7 @@ fn fix_to_pox_contract() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -299,7 +299,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 5a8de4d3bd..574b18e964 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -872,7 +872,7 @@ fn integration_test_get_info() { // explicit trait compliance let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // No trait found diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 1f7252ec5f..167a66f7db 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -663,9 +663,8 @@ pub fn next_block_and_wait_with_timeout( ) -> bool { let current = blocks_processed.load(Ordering::SeqCst); info!( - "Issuing block at {}, waiting for bump ({})", - get_epoch_time_secs(), - current + "Issuing block at {}, waiting for bump ({current})", + get_epoch_time_secs() ); btc_controller.build_next_block(1); let start = Instant::now(); @@ -692,9 +691,8 @@ pub fn next_block_and_iterate( ) -> bool { let current = blocks_processed.load(Ordering::SeqCst); eprintln!( - "Issuing block at {}, waiting for bump ({})", - get_epoch_time_secs(), - current + "Issuing block at {}, waiting for bump ({current})", + get_epoch_time_secs() ); btc_controller.build_next_block(1); let start = Instant::now(); @@ -1064,7 +1062,7 @@ fn bitcoind_integration_test() { // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); @@ -1708,7 +1706,7 @@ fn liquid_ustx_integration() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "ReplaceByFee"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", replaced_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{replaced_txid}")); // mine 1 burn block for the miner to issue the next block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -3555,9 +3553,8 @@ fn microblock_fork_poison_integration_test() { ); eprintln!( - "Created first microblock: {}: {:?}", - &first_microblock.block_hash(), - &first_microblock + "Created first microblock: {}: {first_microblock:?}", + &first_microblock.block_hash() ); // NOTE: this microblock conflicts because it has the same parent as the first microblock, @@ -4015,7 +4012,7 @@ fn microblock_integration_test() { burn_blocks_with_burns.len() ); for burn_block in burn_blocks_with_burns { - eprintln!("{}", burn_block); + eprintln!("{burn_block}"); } let mut prior = None; @@ -5270,9 +5267,9 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { ) ) (begin - (crash-me \"{}\")) + (crash-me \"large-contract-{}-{ix}\")) ", - &format!("large-contract-{}-{ix}", &spender_addrs_c32[ix]) + &spender_addrs_c32[ix] ) )] } else { @@ -5325,8 +5322,8 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { ) ) (begin - (crash-me \"{}\")) - ", &format!("small-contract-{}-{ix}-{i}", &spender_addrs_c32[ix])) + (crash-me \"small-contract-{}-{ix}-{i}\")) + ", spender_addrs_c32[ix]) ); ret.push(tx); } @@ -7758,7 +7755,7 @@ fn atlas_integration_test() { let mut attachments_did_sync = false; let mut timeout = 60; while !attachments_did_sync { - let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client @@ -7872,7 +7869,7 @@ fn atlas_integration_test() { let user = StacksPrivateKey::new(); let zonefile_hex = format!("facade0{i}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - let name = format!("johndoe{}", i); + let name = format!("johndoe{i}"); let tx = make_contract_call( &user_1, 2 + i, @@ -9621,7 +9618,7 @@ fn test_problematic_txs_are_not_stored() { let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); - let tx_edge_body = format!("{}u1 {}", tx_edge_body_start, tx_edge_body_end); + let tx_edge_body = format!("{tx_edge_body_start}u1 {tx_edge_body_end}"); let tx_edge = make_contract_publish( &spender_sk_1, @@ -10477,8 +10474,8 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let follower_tip_info = get_chain_info(&follower_conf); eprintln!( - "\nFollower is at burn block {} stacks block {} (bad block is {})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, bad_block_height + "\nFollower is at burn block {} stacks block {} (bad block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); // follower rejects the bad block @@ -11225,8 +11222,8 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let follower_tip_info = get_chain_info(&follower_conf); eprintln!( - "\nFollower is at burn block {} stacks block {} (bad block is {})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, bad_block_height + "\nFollower is at burn block {} stacks block {} (bad block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); // follower rejects the bad microblock -- can't append subsequent blocks @@ -11514,7 +11511,7 @@ fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); let contract_name = format!("crct-{nonce}-{addr_prefix}-{random_iters}"); - eprintln!("Make tx {}", &contract_name); + eprintln!("Make tx {contract_name}"); let tx = make_contract_publish_microblock_only( privk, nonce, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a704d2f2ee..941e492f37 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1520,7 +1520,7 @@ fn multiple_miners() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -3526,7 +3526,7 @@ fn multiple_miners_with_nakamoto_blocks() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index 70d820fbb1..c68b477b47 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -355,7 +355,7 @@ fn test_stackerdb_event_observer() { for i in 0..6 { let slot_id = i as u32; let privk = &privks[i / 3]; - let chunk_str = format!("Hello chunks {}", &i); + let chunk_str = format!("Hello chunks {i}"); let ack = post_stackerdb_chunk( &http_origin, &contract_id, @@ -364,7 +364,7 @@ fn test_stackerdb_event_observer() { slot_id, 1, ); - debug!("ACK: {:?}", &ack); + debug!("ACK: {ack:?}"); let data = get_stackerdb_chunk(&http_origin, &contract_id, slot_id, Some(1)); assert_eq!(data, chunk_str.as_bytes().to_vec()); From 55d923290668fd6b91d626787a515bcd0311020a Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Sat, 2 Nov 2024 22:09:41 -0400 Subject: [PATCH 055/109] feat: add `block_commit_delay_ms` config option This option defines the number of ms to wait after seeing a new burn block before submitting a block commit, allowing time for the first Nakamoto block to arrive so that the block commit does not need to be RBFed. Fixes: #5064 --- .github/workflows/bitcoin-tests.yml | 1 + CHANGELOG.md | 1 + testnet/stacks-node/src/config.rs | 6 + .../stacks-node/src/nakamoto_node/relayer.rs | 29 +++++ testnet/stacks-node/src/tests/signer/v0.rs | 116 +++++++++++++++++- 5 files changed, 151 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 23eed46f1e..dc06a64744 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -120,6 +120,7 @@ jobs: - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend - tests::signer::v0::multiple_miners_with_custom_chain_id + - tests::signer::v0::block_commit_delay - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/CHANGELOG.md b/CHANGELOG.md index ff5fdd588b..f4caf0149c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - Add index for StacksBlockId to nakamoto block headers table (improves node performance) +- Add `block_commit_delay_ms` to the config file to control the time to wait after seeing a new burn block, before submitting a block commit, to allow time for the first Nakamoto block of the new tenure to be mined, allowing this miner to avoid the need to RBF the block commit. ## [3.0.0.0.0] diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 5df5de28f2..26782dd8b3 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -89,6 +89,7 @@ const INV_REWARD_CYCLES_TESTNET: u64 = 6; const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1_000; const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; +const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] @@ -2189,6 +2190,8 @@ pub struct MinerConfig { pub first_rejection_pause_ms: u64, /// Time in milliseconds to pause after receiving subsequent threshold rejections, before proposing a new block. pub subsequent_rejection_pause_ms: u64, + /// Duration to wait for a Nakamoto block after seeing a burnchain block before submitting a block commit. + pub block_commit_delay: Duration, } impl Default for MinerConfig { @@ -2221,6 +2224,7 @@ impl Default for MinerConfig { min_time_between_blocks_ms: DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS, first_rejection_pause_ms: DEFAULT_FIRST_REJECTION_PAUSE_MS, subsequent_rejection_pause_ms: DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS, + block_commit_delay: Duration::from_millis(DEFAULT_BLOCK_COMMIT_DELAY_MS), } } } @@ -2585,6 +2589,7 @@ pub struct MinerConfigFile { pub min_time_between_blocks_ms: Option, pub first_rejection_pause_ms: Option, pub subsequent_rejection_pause_ms: Option, + pub block_commit_delay_ms: Option, } impl MinerConfigFile { @@ -2700,6 +2705,7 @@ impl MinerConfigFile { }).unwrap_or(miner_default_config.min_time_between_blocks_ms), first_rejection_pause_ms: self.first_rejection_pause_ms.unwrap_or(miner_default_config.first_rejection_pause_ms), subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), + block_commit_delay: self.block_commit_delay_ms.map(Duration::from_millis).unwrap_or(miner_default_config.block_commit_delay), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index ef01f67f4b..16d05799e6 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -235,6 +235,8 @@ pub struct RelayerThread { /// Information about the last-sent block commit, and the relayer's view of the chain at the /// time it was sent. last_committed: Option, + /// Timeout for waiting for the first block in a tenure before submitting a block commit + new_tenure_timeout: Option, } impl RelayerThread { @@ -292,6 +294,7 @@ impl RelayerThread { is_miner, next_initiative: Instant::now() + Duration::from_millis(next_initiative_delay), last_committed: None, + new_tenure_timeout: None, } } @@ -1179,6 +1182,32 @@ impl RelayerThread { return None; } + if !highest_tenure_changed { + debug!("Relayer: burnchain view changed, but highest tenure did not"); + // The burnchain view changed, but the highest tenure did not, so + // wait a bit for the first block in the new tenure to arrive. This + // is to avoid submitting a block commit that will be immediately + // RBFed when the first block arrives. + if let Some(new_tenure_timeout) = self.new_tenure_timeout { + debug!( + "Relayer: {}s elapsed since burn block arrival", + new_tenure_timeout.elapsed().as_secs(), + ); + if new_tenure_timeout.elapsed() < self.config.miner.block_commit_delay { + return None; + } + } else { + info!( + "Relayer: starting new tenure timeout for {}s", + self.config.miner.block_commit_delay.as_secs() + ); + let timeout = Instant::now() + self.config.miner.block_commit_delay; + self.new_tenure_timeout = Some(Instant::now()); + self.next_initiative = timeout; + return None; + } + } + // burnchain view or highest-tenure view changed, so we need to send (or RBF) a commit Some(RelayerDirective::IssueBlockCommit( stacks_tip_ch, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 234b73684a..36d6062ac8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -72,8 +72,8 @@ use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_controller, - setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + next_block_and_process_new_stacks_block, setup_epoch_3_reward_set, wait_for, + POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, @@ -5607,3 +5607,115 @@ fn multiple_miners_with_custom_chain_id() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test checks the behavior of the `block_commit_delay_ms` configuration option. +fn block_commit_delay() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let block_proposal_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; + }, + |config| { + // Set the block commit delay to 10 minutes to ensure no block commit is sent + config.miner.block_commit_delay = Duration::from_secs(600); + }, + None, + None, + ); + + signer_test.boot_to_epoch_3(); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine first block"); + + // Prevent a block from being mined by making signers reject it. + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(all_signers); + + info!("------------------------- Test Mine Burn Block -------------------------"); + let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + // Mine a burn block and wait for it to be processed. + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + Ok(burn_height > burn_height_before) + }, + ) + .unwrap(); + + // Sleep an extra minute to ensure no block commits are sent + sleep_ms(60_000); + + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + assert_eq!(commits, commits_before); + + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + info!("------------------------- Resume Signing -------------------------"); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + + // Wait for a block to be mined + wait_for(60, || { + let blocks = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + Ok(blocks > blocks_before) + }) + .expect("Timed out waiting for block to be mined"); + + // Wait for a block commit to be sent + wait_for(60, || { + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits > commits_before) + }) + .expect("Timed out waiting for block commit after new Stacks block"); + + signer_test.shutdown(); +} From e19573628b31482f7fe6974c0b5e863bdbffec26 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 11:27:45 -0500 Subject: [PATCH 056/109] feat: remove panic in DB busy handler Instead of panicking after 5m, just print an error with a backtrace every 5 minutes. This is sufficient to detect the situation without the need to crash the node and potentially corrupt chainstate. --- stacks-common/src/util/db.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index 89fe4677c7..53564af597 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -51,26 +51,25 @@ pub fn update_lock_table(conn: &Connection) { /// Called by `rusqlite` if we are waiting too long on a database lock /// If called too many times, will assume a deadlock and panic pub fn tx_busy_handler(run_count: i32) -> bool { - const TIMEOUT: Duration = Duration::from_secs(300); const AVG_SLEEP_TIME_MS: u64 = 100; + // Every ~5min, report an error with a backtrace + // 5min * 60s/min * 1_000ms/s / 100ms + const ERROR_COUNT: u32 = 3_000; + // First, check if this is taking unreasonably long. If so, it's probably a deadlock let run_count = run_count.unsigned_abs(); - let approx_time_elapsed = - Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); - if approx_time_elapsed > TIMEOUT { - error!("Deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); + if run_count > 0 && run_count % ERROR_COUNT == 0 { + error!("Deadlock detected. Waited 5 minutes (estimated) for database lock."; "run_count" => run_count, "backtrace" => ?Backtrace::capture() ); for (k, v) in LOCK_TABLE.lock().unwrap().iter() { error!("Database '{k}' last locked by {v}"); } - panic!("Deadlock in thread {:?}", thread::current().name()); } let mut sleep_time_ms = 2u64.saturating_pow(run_count); - sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); if sleep_time_ms > AVG_SLEEP_TIME_MS { From 04f5c9d68e3d287dddd609b0f242ff220a55fc18 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 11:55:18 -0500 Subject: [PATCH 057/109] chore: update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff5fdd588b..e7caac38fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - Add index for StacksBlockId to nakamoto block headers table (improves node performance) +- Remove the panic for reporting DB deadlocks (just error and continue waiting) ## [3.0.0.0.0] From 4fc99df7f1dd7d172514aa1d4d22331578839c07 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 14:00:21 -0500 Subject: [PATCH 058/109] fix: remove duplicate conditions Additional minor change I noticed when reviewing #5418. --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 6 +----- testnet/stacks-node/src/neon_node.rs | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 441d7ecd2c..63c931bba3 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -303,13 +303,9 @@ impl RelayerThread { /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? - #[allow(clippy::nonminimal_bool)] - #[allow(clippy::eq_op)] fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place - (self.min_network_download_passes <= self.last_network_download_passes - // a network inv pass took place - && self.min_network_download_passes <= self.last_network_download_passes) + self.min_network_download_passes <= self.last_network_download_passes // we waited long enough for a download pass, but timed out waiting || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() // we're not supposed to wait at all diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index efc64bf8e7..8eaefbe432 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -2936,13 +2936,9 @@ impl RelayerThread { /// have we waited for the right conditions under which to start mining a block off of our /// chain tip? - #[allow(clippy::nonminimal_bool)] - #[allow(clippy::eq_op)] pub fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place - (self.min_network_download_passes <= self.last_network_download_passes - // a network inv pass took place - && self.min_network_download_passes <= self.last_network_download_passes) + self.min_network_download_passes <= self.last_network_download_passes // we waited long enough for a download pass, but timed out waiting || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() // we're not supposed to wait at all From bedef8f45420a8d497b8ef77b73b3ed0e4ad22e2 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 15:43:37 -0500 Subject: [PATCH 059/109] chore: remove irrefutable if let This causes a warning in the latest versions of Rust. --- stackslib/src/net/api/getattachmentsinv.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/api/getattachmentsinv.rs b/stackslib/src/net/api/getattachmentsinv.rs index 2ea73baf04..5f7dcc0cf8 100644 --- a/stackslib/src/net/api/getattachmentsinv.rs +++ b/stackslib/src/net/api/getattachmentsinv.rs @@ -96,11 +96,13 @@ impl HttpRequest for RPCGetAttachmentsInvRequestHandler { if key == "index_block_hash" { index_block_hash = StacksBlockId::from_hex(&value).ok(); } else if key == "pages_indexes" { - if let Ok(pages_indexes_value) = value.parse::() { - for entry in pages_indexes_value.split(',') { - if let Ok(page_index) = entry.parse::() { - page_indexes.insert(page_index); - } + #[allow(clippy::expect_used)] + let pages_indexes_value = value + .parse::() + .expect("parse from Cow is always safe"); + for entry in pages_indexes_value.split(',') { + if let Ok(page_index) = entry.parse::() { + page_indexes.insert(page_index); } } } From e234d37eab1d0c7b10f0430cea9f70a604d5de07 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 15:54:03 -0500 Subject: [PATCH 060/109] chore: better fix to the irrefutable if let warning --- stackslib/src/net/api/getattachmentsinv.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/stackslib/src/net/api/getattachmentsinv.rs b/stackslib/src/net/api/getattachmentsinv.rs index 5f7dcc0cf8..b7fe94baf1 100644 --- a/stackslib/src/net/api/getattachmentsinv.rs +++ b/stackslib/src/net/api/getattachmentsinv.rs @@ -96,10 +96,7 @@ impl HttpRequest for RPCGetAttachmentsInvRequestHandler { if key == "index_block_hash" { index_block_hash = StacksBlockId::from_hex(&value).ok(); } else if key == "pages_indexes" { - #[allow(clippy::expect_used)] - let pages_indexes_value = value - .parse::() - .expect("parse from Cow is always safe"); + let pages_indexes_value = value.to_string(); for entry in pages_indexes_value.split(',') { if let Ok(page_index) = entry.parse::() { page_indexes.insert(page_index); From 7a5d0792ccc68ba437f21dd8ac0aee76ae18b1cd Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 16:23:17 -0500 Subject: [PATCH 061/109] test: set `block_commit_delay` to 0 for tests that need to fork --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3e9f235424..8ed453efe1 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -4868,6 +4868,7 @@ fn forked_tenure_is_ignored() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(10); + naka_conf.miner.block_commit_delay = Duration::from_secs(0); let sender_sk = Secp256k1PrivateKey::new(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2362ff5617..e00de387db 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1762,6 +1762,7 @@ fn miner_forking() { config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); config.node.pox_sync_sample_secs = 30; config.burnchain.pox_reward_length = Some(max_sortitions as u32); + config.miner.block_commit_delay = Duration::from_secs(0); config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { @@ -4531,9 +4532,15 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let send_fee = 180; let nmb_txs = 2; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |_| {}, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + None, + None, ); let all_signers = signer_test .signer_stacks_private_keys From 606cb5af412e529c87f9dd2687f70045a2fb6fc9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Mon, 4 Nov 2024 17:09:36 -0500 Subject: [PATCH 062/109] test: improve check in `reorg_locally_accepted_blocks_across_tenures_succeeds` --- testnet/stacks-node/src/tests/signer/v0.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e00de387db..79721a37bf 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4607,6 +4607,12 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { // Clear the stackerdb chunks test_observer::clear(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 let transfer_tx = make_stacks_transfer( &sender_sk, @@ -4619,11 +4625,6 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+1"); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); wait_for(short_timeout, || { let ignored_signers = test_observer::get_stackerdb_chunks() .into_iter() From ca3b2ae990534b91535731a9453af14bf9fe4962 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 4 Nov 2024 17:23:05 -0500 Subject: [PATCH 063/109] chore: API sync --- stackslib/src/net/chat.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 5cf32a8a56..60a1abcb4d 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -3314,12 +3314,14 @@ mod test { let atlasdb = AtlasDB::connect(atlas_config, &atlasdb_path, true).unwrap(); let stackerdbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); let peerdb = PeerDB::open(&peerdb_path, true).unwrap(); + let burnchain_db = burnchain.open_burnchain_db(false).unwrap(); let local_peer = PeerDB::get_local_peer(peerdb.conn()).unwrap(); let network = PeerNetwork::new( peerdb, atlasdb, stackerdbs, + burnchain_db, local_peer, peer_version, burnchain.clone(), From 06108f2ff1dab373d5cfe459820b19836b2e1de0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 4 Nov 2024 17:23:20 -0500 Subject: [PATCH 064/109] fix: use burnchain tip reward cycle to infer whether or not to sync to current or next reward cycle --- stackslib/src/net/inv/nakamoto.rs | 37 ++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 3f4fcb6165..87209e4496 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -579,10 +579,10 @@ impl NakamotoTenureInv { /// Reset synchronization state for this peer. Don't remove inventory data; just make it so we /// can talk to the peer again - pub fn try_reset_comms(&mut self, inv_sync_interval: u64, start_rc: u64, cur_rc: u64) { + pub fn try_reset_comms(&mut self, inv_sync_interval: u64, start_rc: u64, max_rc: u64) { let now = get_epoch_time_secs(); if self.start_sync_time + inv_sync_interval <= now - && (self.cur_reward_cycle >= cur_rc || !self.online) + && (self.cur_reward_cycle >= max_rc || !self.online) { self.reset_comms(start_rc); } @@ -618,20 +618,20 @@ impl NakamotoTenureInv { pub fn getnakamotoinv_begin( &mut self, network: &mut PeerNetwork, - current_reward_cycle: u64, + max_reward_cycle: u64, ) -> bool { debug!( "{:?}: Begin Nakamoto inventory sync for {} in cycle {}", network.get_local_peer(), self.neighbor_address, - current_reward_cycle, + max_reward_cycle, ); // possibly reset communications with this peer, if it's time to do so. self.try_reset_comms( network.get_connection_opts().inv_sync_interval, - current_reward_cycle.saturating_sub(network.get_connection_opts().inv_reward_cycles), - current_reward_cycle, + max_reward_cycle.saturating_sub(network.get_connection_opts().inv_reward_cycles), + max_reward_cycle, ); if !self.is_online() { // don't talk to this peer for now @@ -643,7 +643,7 @@ impl NakamotoTenureInv { return false; } - if self.reward_cycle() > current_reward_cycle { + if self.reward_cycle() > max_reward_cycle { // we've fully sync'ed with this peer debug!( "{:?}: fully sync'ed: {}", @@ -908,10 +908,24 @@ impl NakamotoInvStateMachine { ) }); - // try to get all of the reward cycles we know about, plus the next one. We try to get - // the next one as well in case we're at a reward cycle boundary, but we're not at the - // chain tip -- the block downloader still needs that next inventory to proceed. - let proceed = inv.getnakamotoinv_begin(network, current_reward_cycle.saturating_add(1)); + let burnchain_tip_reward_cycle = sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + network.stacks_tip.burnchain_height, + ) + .ok_or(NetError::ChainstateError( + "block height comes before system start".into(), + ))?; + + let max_reward_cycle = if burnchain_tip_reward_cycle > current_reward_cycle { + // try to sync up to the next reward cycle + current_reward_cycle.saturating_add(1) + } else { + current_reward_cycle + }; + + let proceed = inv.getnakamotoinv_begin(network, max_reward_cycle); let inv_rc = inv.reward_cycle(); new_inventories.insert(naddr.clone(), inv); @@ -946,6 +960,7 @@ impl NakamotoInvStateMachine { "peer" => ?naddr, "error" => ?e ); + continue; } } From 3369de5195091ab7af69dcd3c7275232206b9243 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 4 Nov 2024 17:23:39 -0500 Subject: [PATCH 065/109] chore: API sync --- stackslib/src/net/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 41cdb16801..86f3d8578f 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3169,10 +3169,13 @@ pub mod test { let stackerdb_contracts: Vec<_> = stacker_db_syncs.keys().map(|cid| cid.clone()).collect(); + let burnchain_db = config.burnchain.open_burnchain_db(false).unwrap(); + let mut peer_network = PeerNetwork::new( peerdb, atlasdb, p2p_stacker_dbs, + burnchain_db, local_peer, config.peer_version, config.burnchain.clone(), From 0e1058ef7454a4b66638963da73c3c9001a8715e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 4 Nov 2024 17:23:51 -0500 Subject: [PATCH 066/109] chore: store burnchain DB handle in p2p network and load burnchain height --- stackslib/src/net/p2p.rs | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 1f5320e60d..46821069cb 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -243,11 +243,18 @@ impl CurrentRewardSet { /// Cached stacks chain tip info, consumed by RPC endpoints #[derive(Clone, Debug, PartialEq)] pub struct StacksTipInfo { + /// consensus hash of the highest processed stacks block pub consensus_hash: ConsensusHash, + /// block hash of the highest processed stacks block pub block_hash: BlockHeaderHash, + /// height of the highest processed stacks block pub height: u64, + /// coinbase height of the highest processed tenure pub coinbase_height: u64, + /// whether or not the system has transitioned to Nakamoto pub is_nakamoto: bool, + /// highest burnchain block discovered + pub burnchain_height: u64, } impl StacksTipInfo { @@ -258,6 +265,7 @@ impl StacksTipInfo { height: 0, coinbase_height: 0, is_nakamoto: false, + burnchain_height: 0, } } @@ -306,6 +314,9 @@ pub struct PeerNetwork { pub peerdb: PeerDB, pub atlasdb: AtlasDB, + // handle to burnchain DB + pub burnchain_db: BurnchainDB, + // ongoing p2p conversations (either they reached out to us, or we to them) pub peers: PeerMap, pub sockets: HashMap, @@ -444,6 +455,7 @@ impl PeerNetwork { peerdb: PeerDB, atlasdb: AtlasDB, stackerdbs: StackerDBs, + burnchain_db: BurnchainDB, mut local_peer: LocalPeer, peer_version: u32, burnchain: Burnchain, @@ -509,6 +521,8 @@ impl PeerNetwork { peerdb, atlasdb, + burnchain_db, + peers: PeerMap::new(), sockets: HashMap::new(), events: HashMap::new(), @@ -4257,6 +4271,7 @@ impl PeerNetwork { .anchored_header .as_stacks_nakamoto() .is_some(), + burnchain_height: self.stacks_tip.burnchain_height, }; debug!( "{:?}: Parent Stacks tip off of {} is {:?}", @@ -4387,6 +4402,7 @@ impl PeerNetwork { let (stacks_tip_ch, stacks_tip_bhh, stacks_tip_height) = SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sortdb.conn())?; + let new_burnchain_tip = self.burnchain_db.get_canonical_chain_tip()?; let burnchain_tip_changed = canonical_sn.block_height != self.chain_view.burn_block_height || self.num_state_machine_passes == 0 || canonical_sn.sortition_id != self.burnchain_tip.sortition_id; @@ -4465,6 +4481,7 @@ impl PeerNetwork { height: 0, coinbase_height: 0, is_nakamoto: false, + burnchain_height: 0, } } Err(e) => return Err(e), @@ -4536,12 +4553,10 @@ impl PeerNetwork { if self.get_current_epoch().epoch_id < StacksEpochId::Epoch30 { // update heaviest affirmation map view - let burnchain_db = self.burnchain.open_burnchain_db(false)?; - self.heaviest_affirmation_map = static_get_heaviest_affirmation_map( &self.burnchain, indexer, - &burnchain_db, + &self.burnchain_db, sortdb, &canonical_sn.sortition_id, ) @@ -4552,7 +4567,7 @@ impl PeerNetwork { self.tentative_best_affirmation_map = static_get_canonical_affirmation_map( &self.burnchain, indexer, - &burnchain_db, + &self.burnchain_db, sortdb, chainstate, &canonical_sn.sortition_id, @@ -4593,9 +4608,8 @@ impl PeerNetwork { if stacks_tip_changed && self.get_current_epoch().epoch_id < StacksEpochId::Epoch30 { // update stacks tip affirmation map view // (NOTE: this check has to happen _after_ self.chain_view gets updated!) - let burnchain_db = self.burnchain.open_burnchain_db(false)?; self.stacks_tip_affirmation_map = static_get_stacks_tip_affirmation_map( - &burnchain_db, + &self.burnchain_db, sortdb, &canonical_sn.sortition_id, &canonical_sn.canonical_stacks_tip_consensus_hash, @@ -4661,8 +4675,10 @@ impl PeerNetwork { height: stacks_tip_height, coinbase_height, is_nakamoto: stacks_tip_is_nakamoto, + burnchain_height: new_burnchain_tip.block_height, }; self.parent_stacks_tip = parent_stacks_tip; + self.parent_stacks_tip.burnchain_height = new_burnchain_tip.block_height; debug!( "{:?}: canonical Stacks tip is now {:?}", @@ -5299,12 +5315,14 @@ mod test { let atlas_config = AtlasConfig::new(false); let atlasdb = AtlasDB::connect_memory(atlas_config).unwrap(); let stacker_db = StackerDBs::connect_memory(); + let burnchain_db = burnchain.open_burnchain_db(false).unwrap(); let local_peer = PeerDB::get_local_peer(db.conn()).unwrap(); let p2p = PeerNetwork::new( db, atlasdb, stacker_db, + burnchain_db, local_peer, 0x12345678, burnchain, From ad4faaf8ffcd2efbc7deb89b468dc6cce25d4611 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 4 Nov 2024 17:24:09 -0500 Subject: [PATCH 067/109] chore: API sync, and test fixes --- stackslib/src/net/tests/mod.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 11310ecd52..b8e9167ad9 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -537,7 +537,7 @@ impl NakamotoBootPlan { }) .collect(); - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); let mut stacks_block = peer.tenure_with_txs(&stack_txs, &mut peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -545,13 +545,14 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { - let old_tip = other_peer.network.stacks_tip.clone(); + let mut old_tip = other_peer.network.stacks_tip.clone(); other_peer.tenure_with_txs(&stack_txs, other_peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -560,6 +561,7 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = other_peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, other_peer.network.parent_stacks_tip); } } @@ -572,7 +574,7 @@ impl NakamotoBootPlan { .burnchain .is_in_prepare_phase(sortition_height.into()) { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); stacks_block = peer.tenure_with_txs(&[], &mut peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -580,13 +582,14 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } other_peers .iter_mut() .zip(other_peer_nonces.iter_mut()) .for_each(|(peer, nonce)| { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); peer.tenure_with_txs(&[], nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -595,6 +598,7 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } }); @@ -607,7 +611,7 @@ impl NakamotoBootPlan { // advance to the start of epoch 3.0 while sortition_height < epoch_30_height - 1 { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); peer.tenure_with_txs(&vec![], &mut peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -615,13 +619,14 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); other_peer.tenure_with_txs(&vec![], other_peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -630,6 +635,8 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = + other_peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, other_peer.network.parent_stacks_tip); } } From 4365ebf97ca684c49e67c7a6634a9a2b1c6b8058 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 4 Nov 2024 17:24:17 -0500 Subject: [PATCH 068/109] chore: API sync --- testnet/stacks-node/src/neon_node.rs | 4 ++++ testnet/stacks-node/src/node.rs | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index dcfa855c9b..a28969937d 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -4908,6 +4908,9 @@ impl StacksNode { stackerdb_machines.insert(contract_id, (stackerdb_config, stacker_db_sync)); } let peerdb = Self::setup_peer_db(config, &burnchain, &stackerdb_contract_ids); + let burnchain_db = burnchain + .open_burnchain_db(false) + .expect("Failed to open burnchain DB"); let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { Ok(local_peer) => local_peer, @@ -4918,6 +4921,7 @@ impl StacksNode { peerdb, atlasdb, stackerdbs, + burnchain_db, local_peer, config.burnchain.peer_version, burnchain, diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 1895912ba5..6ae9928263 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -489,11 +489,15 @@ impl Node { let event_dispatcher = self.event_dispatcher.clone(); let exit_at_block_height = self.config.burnchain.process_exit_at_block_height.clone(); + let burnchain_db = burnchain + .open_burnchain_db(false) + .expect("Failed to open burnchain DB"); let p2p_net = PeerNetwork::new( peerdb, atlasdb, stackerdbs, + burnchain_db, local_peer, self.config.burnchain.peer_version, burnchain.clone(), From 18be1fe0f0095c785074e5fc987a7b0121dafe32 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 4 Nov 2024 17:34:54 -0500 Subject: [PATCH 069/109] chore: address PR feedback --- .../nakamoto/tenure_downloader_set.rs | 26 +++---------------- testnet/stacks-node/src/config.rs | 2 +- .../stacks-node/src/nakamoto_node/relayer.rs | 2 -- 3 files changed, 5 insertions(+), 25 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index c0b64cf5ce..971bf94db8 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -94,7 +94,7 @@ impl From<&mut NakamotoTenureDownloader> for CompletedTenure { } } -pub const PEER_DEPRIORITIZATION_TIME: u64 = 60; +pub const PEER_DEPRIORITIZATION_TIME_SECS: u64 = 60; /// A set of confirmed downloader state machines assigned to one or more neighbors. The block /// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure @@ -152,7 +152,7 @@ impl NakamotoTenureDownloaderSet { ) { deprioritized_peers.insert( peer.clone(), - get_epoch_time_secs() + PEER_DEPRIORITIZATION_TIME, + get_epoch_time_secs() + PEER_DEPRIORITIZATION_TIME_SECS, ); } @@ -482,20 +482,11 @@ impl NakamotoTenureDownloaderSet { continue; }; - let attempt_count = if let Some(attempt_count) = self.attempted_tenures.get(&ch) { - *attempt_count - } else { - 0 - }; + let attempt_count = *self.attempted_tenures.get(&ch).unwrap_or(&0); self.attempted_tenures .insert(ch.clone(), attempt_count.saturating_add(1)); - let attempt_failed_count = - if let Some(attempt_failed_count) = self.attempt_failed_tenures.get(&ch) { - *attempt_failed_count - } else { - 0 - }; + let attempt_failed_count = *self.attempt_failed_tenures.get(&ch).unwrap_or(&0); info!("Download tenure {}", &ch; "peer" => %naddr, @@ -511,15 +502,6 @@ impl NakamotoTenureDownloaderSet { "tenure_end_reward_cycle" => tenure_info.end_reward_cycle, "tenure_burn_height" => tenure_info.tenure_id_burn_block_height); - debug!( - "Download tenure {} (start={}, end={}) (rc {},{}) burn_height {}", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle, - tenure_info.tenure_id_burn_block_height, - ); let tenure_download = NakamotoTenureDownloader::new( ch.clone(), tenure_info.start_block_id.clone(), diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 1256dec635..8d41d66f5c 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1940,7 +1940,7 @@ impl Default for NodeConfig { max_microblocks: u16::MAX as u64, wait_time_for_microblocks: 30_000, wait_time_for_blocks: 30_000, - next_initiative_delay: 1_000, + next_initiative_delay: 10_000, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: true, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index ae2eb4d483..8282e18031 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1208,7 +1208,6 @@ impl RelayerThread { }; let directive = if let Some(directive) = initiative_directive.take() { - debug!("Relayer: initiative from directive"); directive } else { // channel was drained, so do a time-bound recv @@ -1217,7 +1216,6 @@ impl RelayerThread { )) { Ok(directive) => { // only do this once, so we can call .initiative() again - debug!("Relayer: initiative from recv_timeout"); directive } Err(RecvTimeoutError::Timeout) => { From 67e254ea30fdb4949e9499944937e3f0d63f6449 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 4 Nov 2024 15:27:51 -0800 Subject: [PATCH 070/109] Add config option tenure_last_block_proposal_timeout_secs and add a test that ensures reorgs can't happen before timeout Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + stacks-signer/src/chainstate.rs | 61 ++++- stacks-signer/src/client/mod.rs | 1 + stacks-signer/src/config.rs | 19 +- stacks-signer/src/runloop.rs | 1 + stacks-signer/src/tests/chainstate.rs | 1 + .../src/tests/nakamoto_integrations.rs | 3 + testnet/stacks-node/src/tests/signer/v0.rs | 254 +++++++++++++++++- 8 files changed, 323 insertions(+), 18 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 23eed46f1e..d7713a72c8 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -112,6 +112,7 @@ jobs: - tests::signer::v0::locally_accepted_blocks_overriden_by_global_rejection - tests::signer::v0::locally_rejected_blocks_overriden_by_global_acceptance - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_succeeds + - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_fails - tests::signer::v0::miner_recovers_when_broadcast_block_delay_across_tenures_occurs - tests::signer::v0::multiple_miners_with_nakamoto_blocks - tests::signer::v0::partial_tenure_fork diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 44ae11b252..93c29cb513 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -20,6 +20,7 @@ use blockstack_lib::chainstate::stacks::TenureChangePayload; use blockstack_lib::net::api::getsortition::SortitionInfo; use blockstack_lib::util_lib::db::Error as DBError; use clarity::types::chainstate::BurnchainHeaderHash; +use clarity::util::get_epoch_time_secs; use slog::{slog_info, slog_warn}; use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; use stacks_common::util::hash::Hash160; @@ -27,7 +28,7 @@ use stacks_common::{info, warn}; use crate::client::{ClientError, CurrentAndLastSortition, StacksClient}; use crate::config::SignerConfig; -use crate::signerdb::{BlockState, SignerDb}; +use crate::signerdb::{BlockInfo, BlockState, SignerDb}; #[derive(thiserror::Error, Debug)] /// Error type for the signer chainstate module @@ -119,6 +120,9 @@ pub struct ProposalEvalConfig { pub first_proposal_burn_block_timing: Duration, /// Time between processing a sortition and proposing a block before the block is considered invalid pub block_proposal_timeout: Duration, + /// Time to wait for the last block of a tenure to be globally accepted or rejected before considering + /// a new miner's block at the same height as valid. + pub tenure_last_block_proposal_timeout: Duration, } impl From<&SignerConfig> for ProposalEvalConfig { @@ -126,6 +130,7 @@ impl From<&SignerConfig> for ProposalEvalConfig { Self { first_proposal_burn_block_timing: value.first_proposal_burn_block_timing, block_proposal_timeout: value.block_proposal_timeout, + tenure_last_block_proposal_timeout: value.tenure_last_block_proposal_timeout, } } } @@ -460,7 +465,35 @@ impl SortitionsView { Ok(true) } - /// Check if the tenure change block confirms the expected parent block (i.e., the last globally accepted block in the parent tenure) + /// Get the last block from the given tenure + /// Returns the last locally accepted block if it is not timed out, otherwise it will return the last globally accepted block. + fn get_tenure_last_block_info( + consensus_hash: &ConsensusHash, + signer_db: &SignerDb, + tenure_last_block_proposal_timeout: Duration, + ) -> Result, ClientError> { + // Get the last known block in the previous tenure + let last_locally_accepted_block = signer_db + .get_last_accepted_block(consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + + if let Some(local_info) = last_locally_accepted_block { + if let Some(signed_over_time) = local_info.signed_self { + if signed_over_time + tenure_last_block_proposal_timeout.as_secs() + > get_epoch_time_secs() + { + return Ok(Some(local_info)); + } + } + } + + signer_db + .get_last_globally_accepted_block(consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string())) + } + + /// Check if the tenure change block confirms the expected parent block + /// (i.e., the last locally accepted block in the parent tenure, or if that block is timed out, the last globally accepted block in the parent tenure) /// It checks the local DB first, and if the block is not present in the local DB, it asks the /// Stacks node for the highest processed block header in the given tenure (and then caches it /// in the DB). @@ -473,24 +506,27 @@ impl SortitionsView { reward_cycle: u64, signer_db: &mut SignerDb, client: &StacksClient, + tenure_last_block_proposal_timeout: Duration, ) -> Result { - // If the tenure change block confirms the expected parent block, it should confirm at least one more block than the last globally accepted block in the parent tenure. - let last_globally_accepted_block = signer_db - .get_last_globally_accepted_block(&tenure_change.prev_tenure_consensus_hash) - .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + // If the tenure change block confirms the expected parent block, it should confirm at least one more block than the last accepted block in the parent tenure. + let last_block_info = Self::get_tenure_last_block_info( + &tenure_change.prev_tenure_consensus_hash, + signer_db, + tenure_last_block_proposal_timeout, + )?; - if let Some(global_info) = last_globally_accepted_block { + if let Some(info) = last_block_info { // N.B. this block might not be the last globally accepted block across the network; // it's just the highest one in this tenure that we know about. If this given block is // no higher than it, then it's definitely no higher than the last globally accepted // block across the network, so we can do an early rejection here. - if block.header.chain_length <= global_info.block.header.chain_length { + if block.header.chain_length <= info.block.header.chain_length { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, - "expected_at_least" => global_info.block.header.chain_length + 1, + "expected_at_least" => info.block.header.chain_length + 1, ); return Ok(false); } @@ -558,6 +594,7 @@ impl SortitionsView { reward_cycle, signer_db, client, + self.config.tenure_last_block_proposal_timeout, )?; if !confirms_expected_parent { return Ok(false); @@ -573,15 +610,15 @@ impl SortitionsView { if !is_valid_parent_tenure { return Ok(false); } - let last_in_tenure = signer_db + let last_in_current_tenure = signer_db .get_last_globally_accepted_block(&block.header.consensus_hash) .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; - if let Some(last_in_tenure) = last_in_tenure { + if let Some(last_in_current_tenure) = last_in_current_tenure { warn!( "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "last_in_tenure_signer_sighash" => %last_in_tenure.block.header.signer_signature_hash(), + "last_in_tenure_signer_sighash" => %last_in_current_tenure.block.header.signer_signature_hash(), ); return Ok(false); } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 9885182d98..8df1d81daf 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -411,6 +411,7 @@ pub(crate) mod tests { db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, + tenure_last_block_proposal_timeout: config.tenure_last_block_proposal_timeout, } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 7dd9cc4fdf..2fe8e48093 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -36,6 +36,7 @@ use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 600_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; +const DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS: u64 = 30; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -128,6 +129,9 @@ pub struct SignerConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, + /// Time to wait for the last block of a tenure to be globally accepted or rejected + /// before considering a new miner's block at the same height as potentially valid. + pub tenure_last_block_proposal_timeout: Duration, } /// The parsed configuration for the signer @@ -158,6 +162,9 @@ pub struct GlobalConfig { pub block_proposal_timeout: Duration, /// An optional custom Chain ID pub chain_id: Option, + /// Time to wait for the last block of a tenure to be globally accepted or rejected + /// before considering a new miner's block at the same height as potentially valid. + pub tenure_last_block_proposal_timeout: Duration, } /// Internal struct for loading up the config file @@ -180,13 +187,16 @@ struct RawConfigFile { pub db_path: String, /// Metrics endpoint pub metrics_endpoint: Option, - /// How much time must pass between the first block proposal in a tenure and the next bitcoin block + /// How much time must pass in seconds between the first block proposal in a tenure and the next bitcoin block /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing_secs: Option, /// How much time to wait for a miner to propose a block following a sortition in milliseconds pub block_proposal_timeout_ms: Option, /// An optional custom Chain ID pub chain_id: Option, + /// Time in seconds to wait for the last block of a tenure to be globally accepted or rejected + /// before considering a new miner's block at the same height as potentially valid. + pub tenure_last_block_proposal_timeout_secs: Option, } impl RawConfigFile { @@ -266,6 +276,12 @@ impl TryFrom for GlobalConfig { .unwrap_or(BLOCK_PROPOSAL_TIMEOUT_MS), ); + let tenure_last_block_proposal_timeout = Duration::from_secs( + raw_data + .tenure_last_block_proposal_timeout_secs + .unwrap_or(DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS), + ); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -279,6 +295,7 @@ impl TryFrom for GlobalConfig { first_proposal_burn_block_timing, block_proposal_timeout, chain_id: raw_data.chain_id, + tenure_last_block_proposal_timeout, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index a0e2b739e9..2901bb0052 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -283,6 +283,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo mainnet: self.config.network.is_mainnet(), db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, + tenure_last_block_proposal_timeout: self.config.tenure_last_block_proposal_timeout, })) } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 886480f063..bec9f1258d 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -89,6 +89,7 @@ fn setup_test_environment( config: ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(5), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }, }; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3e9f235424..71de63c4e8 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6369,6 +6369,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); @@ -6507,6 +6508,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -6584,6 +6586,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a704d2f2ee..6e97112ffb 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -453,6 +453,7 @@ fn block_proposal_rejection() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), @@ -4501,7 +4502,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { #[test] #[ignore] /// Test that signers that have accept a locally signed block N+1 built in tenure A can sign a block proposed during a -/// new tenure B built upon the last globally accepted block N, i.e. a reorg can occur at a tenure boundary. +/// new tenure B built upon the last globally accepted block N if the timeout is exceeded, i.e. a reorg can occur at a tenure boundary. /// /// Test Setup: /// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. @@ -4531,9 +4532,16 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let send_fee = 180; let nmb_txs = 2; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Just accept all reorg attempts + config.tenure_last_block_proposal_timeout = Duration::from_secs(0); + }, + |_| {}, + None, + None, ); let all_signers = signer_test .signer_stacks_private_keys @@ -4593,6 +4601,11 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .cloned() .take(num_signers * 7 / 10) .collect(); + let non_ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .skip(num_signers * 7 / 10) + .collect(); TEST_IGNORE_ALL_BLOCK_PROPOSALS .lock() .unwrap() @@ -4618,7 +4631,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .get_peer_info() .expect("Failed to get peer info"); wait_for(short_timeout, || { - let ignored_signers = test_observer::get_stackerdb_chunks() + let accepted_signers = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) .filter_map(|chunk| { @@ -4626,7 +4639,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .expect("Failed to deserialize SignerMessage"); match message { SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - ignoring_signers.iter().find(|key| { + non_ignoring_signers.iter().find(|key| { key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) .is_ok() }) @@ -4635,7 +4648,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { } }) .collect::>(); - Ok(ignored_signers.len() + ignoring_signers.len() == num_signers) + Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) }) .expect("FAIL: Timed out waiting for block proposal acceptance"); @@ -4710,6 +4723,237 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .expect("Timed out waiting for block acceptance of N+1'"); } +#[test] +#[ignore] +/// Test that signers that have accept a locally signed block N+1 built in tenure A cannot sign a block proposed during a +/// new tenure B built upon the last globally accepted block N if the timeout is not exceeded, i.e. a reorg cannot occur at a tenure boundary +/// before the specified timeout has been exceeded. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but <30% accept it. The remaining signers +/// do not make a decision on the block. A new tenure begins and the miner proposes a new block N+1' which all signers reject as the timeout +/// has not been exceeded. +/// +/// Test Assertion: +/// Stacks tip remains at N. +fn reorg_locally_accepted_blocks_across_tenures_fails() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 2; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Do not alow any reorg attempts essentially + config.tenure_last_block_proposal_timeout = Duration::from_secs(100_000); + }, + |_| {}, + None, + None, + ); + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; + signer_test.boot_to_epoch_3(); + info!("------------------------- Starting Tenure A -------------------------"); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); + wait_for(short_timeout, || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected + let ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 7 / 10) + .collect(); + let non_ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .skip(num_signers * 7 / 10) + .collect(); + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(ignoring_signers.clone()); + // Clear the stackerdb chunks + test_observer::clear(); + + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + + info!("Submitted tx {tx} in to attempt to mine block N+1"); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + wait_for(short_timeout, || { + let accepted_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + non_ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }) + } + _ => None, + } + }) + .collect::>(); + Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance"); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + + info!("------------------------- Starting Tenure B -------------------------"); + // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!( + "------------------------- Attempt to mine Nakamoto Block N+1' in Tenure B -------------------------" + ); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + // The miner's proposed block should get rejected by all the signers that PREVIOUSLY accepted the block + wait_for(short_timeout, || { + let rejected_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signature, + signer_signature_hash, + .. + })) => non_ignoring_signers + .iter() + .find(|key| key.verify(signer_signature_hash.bits(), &signature).is_ok()), + _ => None, + } + }) + .collect::>(); + Ok(rejected_signers.len() + ignoring_signers.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal rejections"); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1' + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1_prime = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n_1_prime); + assert_ne!( + info_after.stacks_tip.to_string(), + block_n_1_prime.block_hash + ); +} + #[test] #[ignore] /// Test that when 70% of signers accept a block, mark it globally accepted, but a miner ends its tenure From 0fbe07fa07a37899f931f3f32e6f7850dbe4b585 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 4 Nov 2024 17:29:00 -0800 Subject: [PATCH 071/109] Fix signer_chainstate test and add it to CI Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + stacks-signer/src/chainstate.rs | 3 ++- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 6 +++--- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index d7713a72c8..e2d888c2ec 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -134,6 +134,7 @@ jobs: - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::nakamoto_integrations::v3_signer_api_endpoint + - tests::nakamoto_integrations::signer_chainstate # TODO: enable these once v1 signer is supported by a new nakamoto epoch # - tests::signer::v1::dkg # - tests::signer::v1::sign_request_rejected diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 93c29cb513..4159e2fe83 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -482,11 +482,12 @@ impl SortitionsView { if signed_over_time + tenure_last_block_proposal_timeout.as_secs() > get_epoch_time_secs() { + // The last locally accepted block is not timed out, return it return Ok(Some(local_info)); } } } - + // The last locally accepted block is timed out, get the last globally accepted block signer_db .get_last_globally_accepted_block(consensus_hash) .map_err(|e| ClientError::InvalidResponse(e.to_string())) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 71de63c4e8..6d70f5b51e 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6543,10 +6543,10 @@ fn signer_chainstate() { valid: Some(true), signed_over: true, proposed_time: get_epoch_time_secs(), - signed_self: None, - signed_group: None, + signed_self: Some(get_epoch_time_secs()), + signed_group: Some(get_epoch_time_secs()), ext: ExtraBlockInfo::None, - state: BlockState::Unprocessed, + state: BlockState::GloballyAccepted, }) .unwrap(); From bce9839e1668523db51811bbbb3a82b51b2c17d1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 4 Nov 2024 22:25:34 -0500 Subject: [PATCH 072/109] fix: disconnect from neighbors serving unconfirmed tenures that are stale (otherwise we would cease to make progress if the node never caught up), and throttle down unconfirmed download checks --- .../nakamoto/download_state_machine.rs | 46 +++++++++++++++---- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 7655a56ab5..c2b66beab1 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -68,6 +68,9 @@ use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; +/// How often to check for unconfirmed tenures +const CHECK_UNCONFIRMED_TENURES_MS: u128 = 1_000; + /// The overall downloader can operate in one of two states: /// * it's doing IBD, in which case it's downloading tenures using neighbor inventories and /// the start/end block ID hashes obtained from block-commits. This works up until the last two @@ -118,6 +121,10 @@ pub struct NakamotoDownloadStateMachine { pub(super) neighbor_rpc: NeighborRPC, /// Nakamoto chain tip nakamoto_tip: StacksBlockId, + /// do we need to fetch unconfirmed tenures? + fetch_unconfirmed_tenures: bool, + /// last time an unconfirmed tenures was checked + last_unconfirmed_download_check_ms: u128, /// last time an unconfirmed downloader was run last_unconfirmed_download_run_ms: u128, } @@ -139,6 +146,8 @@ impl NakamotoDownloadStateMachine { unconfirmed_tenure_downloads: HashMap::new(), neighbor_rpc: NeighborRPC::new(), nakamoto_tip, + fetch_unconfirmed_tenures: false, + last_unconfirmed_download_check_ms: 0, last_unconfirmed_download_run_ms: 0, } } @@ -1218,6 +1227,7 @@ impl NakamotoDownloadStateMachine { ) { Ok(blocks_opt) => blocks_opt, Err(NetError::StaleView) => { + neighbor_rpc.add_dead(network, &naddr); continue; } Err(e) => { @@ -1426,14 +1436,30 @@ impl NakamotoDownloadStateMachine { ); // check this now, since we mutate self.available - let need_unconfirmed_tenures = Self::need_unconfirmed_tenures( - burnchain_height, - &network.burnchain_tip, - &self.wanted_tenures, - self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), - &self.tenure_block_ids, - &self.available_tenures, - ); + self.fetch_unconfirmed_tenures = if self + .last_unconfirmed_download_check_ms + .saturating_add(CHECK_UNCONFIRMED_TENURES_MS) + > get_epoch_time_ms() + { + debug!( + "Throttle checking for unconfirmed tenures until {}", + self.last_unconfirmed_download_check_ms + .saturating_add(CHECK_UNCONFIRMED_TENURES_MS) + / 1000 + ); + false + } else { + let do_fetch = Self::need_unconfirmed_tenures( + burnchain_height, + &network.burnchain_tip, + &self.wanted_tenures, + self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), + &self.tenure_block_ids, + &self.available_tenures, + ); + self.last_unconfirmed_download_check_ms = get_epoch_time_ms(); + do_fetch + }; match self.state { NakamotoDownloadState::Confirmed => { @@ -1443,7 +1469,7 @@ impl NakamotoDownloadStateMachine { .expect("FATAL: max_inflight_blocks exceeds usize::MAX"), ); - if self.tenure_downloads.is_empty() && need_unconfirmed_tenures { + if self.tenure_downloads.is_empty() && self.fetch_unconfirmed_tenures { debug!( "Transition from {} to {}", &self.state, @@ -1488,7 +1514,7 @@ impl NakamotoDownloadStateMachine { } else if self.unconfirmed_tenure_downloads.is_empty() && self.unconfirmed_tenure_download_schedule.is_empty() { - if need_unconfirmed_tenures { + if self.fetch_unconfirmed_tenures { // do this again self.unconfirmed_tenure_download_schedule = Self::make_unconfirmed_tenure_download_schedule( From ead50a87b146403bcb2c8eaab67865d4546836b9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 5 Nov 2024 00:24:27 -0500 Subject: [PATCH 073/109] fix: fix unit tests that broke due to PeerNetwork needing an existing BurnchainDB --- stackslib/src/net/chat.rs | 284 ++++++++++++++++++++------------------ 1 file changed, 153 insertions(+), 131 deletions(-) diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 60a1abcb4d..273c1c7335 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -3069,6 +3069,7 @@ mod test { use std::io::prelude::*; use std::io::{Read, Write}; use std::net::{SocketAddr, SocketAddrV4}; + use std::path::PathBuf; use clarity::vm::costs::ExecutionCost; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId}; @@ -3080,6 +3081,7 @@ mod test { use super::*; use crate::burnchains::bitcoin::keys::BitcoinPublicKey; use crate::burnchains::burnchain::*; + use crate::burnchains::db::BurnchainDB; use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::*; @@ -3123,6 +3125,8 @@ mod test { let peerdb_path = format!("{}/peers.sqlite", &test_path); let stackerdb_path = format!("{}/stackerdb.sqlite", &test_path); let chainstate_path = format!("{}/chainstate", &test_path); + let burnchain_db = + BurnchainDB::connect(&burnchain.get_burnchaindb_path(), burnchain, true).unwrap(); let mut peerdb = PeerDB::connect( &peerdb_path, @@ -3333,7 +3337,7 @@ mod test { network } - fn testing_burnchain_config() -> Burnchain { + fn testing_burnchain_config(test_name: &str) -> Burnchain { let first_burn_hash = BurnchainHeaderHash::from_hex( "0000000000000000000000000000000000000000000000000000000000000000", ) @@ -3344,7 +3348,7 @@ mod test { network_id: 0, chain_name: "bitcoin".to_string(), network_name: "testnet".to_string(), - working_dir: "/nope".to_string(), + working_dir: format!("/tmp/stacks-test-databases-{}", test_name), consensus_hash_lifetime: 24, stable_confirmations: 7, first_block_height: 12300, @@ -3368,8 +3372,6 @@ mod test { let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); - let burnchain = testing_burnchain_config(); - let mut chain_view_1 = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -3399,10 +3401,13 @@ mod test { &peer_2_rc_consensus_hash ); + let burnchain_1 = testing_burnchain_config(&test_name_1); + let burnchain_2 = testing_burnchain_config(&test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -3413,7 +3418,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -3424,7 +3429,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -3433,7 +3438,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -3447,7 +3452,7 @@ mod test { peerdb_1 .update_local_peer( 0x9abcdef0, - burnchain.network_id, + burnchain_1.network_id, local_peer_1.data_url, local_peer_1.port, &[ @@ -3460,7 +3465,7 @@ mod test { peerdb_2 .update_local_peer( 0x9abcdef0, - burnchain.network_id, + burnchain_2.network_id, local_peer_2.data_url, local_peer_2.port, &[ @@ -3492,7 +3497,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -3502,7 +3507,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -3710,8 +3715,6 @@ mod test { let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -3725,10 +3728,13 @@ mod test { let test_name_1 = "convo_handshake_accept_1"; let test_name_2 = "convo_handshake_accept_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -3739,7 +3745,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -3750,7 +3756,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -3759,7 +3765,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -3773,7 +3779,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -3783,7 +3789,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -3889,8 +3895,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -3904,10 +3908,13 @@ mod test { let test_name_1 = "convo_handshake_reject_1"; let test_name_2 = "convo_handshake_reject_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -3918,7 +3925,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -3929,7 +3936,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -3938,7 +3945,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -3952,7 +3959,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -3962,7 +3969,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4028,8 +4035,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4048,10 +4053,13 @@ mod test { let test_name_1 = "convo_handshake_badsignature_1"; let test_name_2 = "convo_handshake_badsignature_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4062,7 +4070,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4073,7 +4081,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4082,7 +4090,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4096,7 +4104,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4106,7 +4114,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4171,8 +4179,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4191,10 +4197,13 @@ mod test { let test_name_1 = "convo_handshake_badpeeraddress_1"; let test_name_2 = "convo_handshake_badpeeraddress_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4205,7 +4214,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4216,7 +4225,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4225,7 +4234,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4239,7 +4248,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4249,7 +4258,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4332,8 +4341,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4347,10 +4354,13 @@ mod test { let test_name_1 = "convo_handshake_update_key_1"; let test_name_2 = "convo_handshake_update_key_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4361,7 +4371,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4372,7 +4382,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4381,7 +4391,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4395,7 +4405,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4405,7 +4415,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4525,8 +4535,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4545,10 +4553,13 @@ mod test { let test_name_1 = "convo_handshake_self_1"; let test_name_2 = "convo_handshake_self_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4559,7 +4570,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4570,7 +4581,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4579,7 +4590,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4593,7 +4604,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4603,7 +4614,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4668,8 +4679,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4688,10 +4697,13 @@ mod test { let test_name_1 = "convo_ping_1"; let test_name_2 = "convo_ping_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4702,7 +4714,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4713,7 +4725,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4722,7 +4734,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4736,7 +4748,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4746,7 +4758,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4843,8 +4855,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4863,10 +4873,13 @@ mod test { let test_name_1 = "convo_handshake_ping_loop_1"; let test_name_2 = "convo_handshake_ping_loop_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4877,7 +4890,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4888,7 +4901,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4897,7 +4910,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4911,7 +4924,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4921,7 +4934,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5069,8 +5082,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5089,10 +5100,13 @@ mod test { let test_name_1 = "convo_nack_unsolicited_1"; let test_name_2 = "convo_nack_unsolicited_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5103,7 +5117,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5114,7 +5128,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5123,7 +5137,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5137,7 +5151,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5147,7 +5161,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5218,8 +5232,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5237,10 +5249,14 @@ mod test { let test_name_1 = "convo_ignore_unsolicited_handshake_1"; let test_name_2 = "convo_ignore_unsolicited_handshake_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5251,7 +5267,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5262,7 +5278,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5271,7 +5287,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5285,7 +5301,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5295,7 +5311,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5392,8 +5408,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12331, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5406,10 +5420,14 @@ mod test { let test_name_1 = "convo_handshake_getblocksinv_1"; let test_name_2 = "convo_handshake_getblocksinv_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5420,7 +5438,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5431,7 +5449,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5440,7 +5458,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5454,7 +5472,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5464,7 +5482,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5669,8 +5687,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12331, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5683,10 +5699,14 @@ mod test { let test_name_1 = "convo_handshake_getnakamotoinv_1"; let test_name_2 = "convo_handshake_getnakamotoinv_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5697,7 +5717,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5708,7 +5728,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5717,7 +5737,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5731,7 +5751,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5741,7 +5761,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5942,8 +5962,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5961,10 +5979,14 @@ mod test { let test_name_1 = "convo_natpunch_1"; let test_name_2 = "convo_natpunch_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12352, "http://peer1.com".into(), @@ -5975,7 +5997,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12353, "http://peer2.com".into(), @@ -5986,7 +6008,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5995,7 +6017,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -6009,7 +6031,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -6019,7 +6041,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -6083,8 +6105,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6096,6 +6116,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "convo_is_preamble_valid"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, chainstate_1) = make_test_chain_dbs( test_name_1, @@ -6364,7 +6386,7 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); + let burnchain = testing_burnchain_config("unused"); let mut chain_view = BurnchainView { burn_block_height: 12348, @@ -6750,8 +6772,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6763,6 +6783,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "sign_relay_forward_message_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -6868,8 +6890,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6881,6 +6901,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "sign_and_forward_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -6935,8 +6957,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6948,6 +6968,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_block_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -7069,8 +7091,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -7082,6 +7102,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_transaction_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -7203,8 +7225,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -7216,6 +7236,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_microblocks_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -7337,8 +7359,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -7350,6 +7370,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_stackerdb_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, From 1aa67938803013cd6d635dfd0e6794920aa8921c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 5 Nov 2024 10:56:25 -0800 Subject: [PATCH 074/109] Apply clippy Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index ea24d24a5b..c41dd110e9 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5044,7 +5044,7 @@ fn continue_after_fast_block_no_sortition() { let send_fee = 180; let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * 5)], + vec![(sender_addr, (send_amt + send_fee) * 5)], ); let timeout = Duration::from_secs(200); let _coord_channel = signer_test.running_nodes.coord_channel.clone(); @@ -5056,7 +5056,7 @@ fn continue_after_fast_block_no_sortition() { let sortdb = burnchain.open_sortition_db(true).unwrap(); let get_burn_height = || { - SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height }; @@ -5093,7 +5093,7 @@ fn continue_after_fast_block_no_sortition() { .expect("Timed out waiting for a new block commit"); // Make all signers ignore block proposals - let ignoring_signers: Vec<_> = all_signers.iter().cloned().collect(); + let ignoring_signers = all_signers.to_vec(); TEST_REJECT_ALL_BLOCK_PROPOSAL .lock() .unwrap() @@ -5126,7 +5126,7 @@ fn continue_after_fast_block_no_sortition() { .unwrap(); // assure we have a sortition - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); assert!(tip.sortition); let burn_height_before = signer_test From d0355cb1a9f6e6024647e15f98585b39ed8c3370 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 5 Nov 2024 12:18:53 -0800 Subject: [PATCH 075/109] Use multiple miners to trigger weird chain stall Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 309 ++++++++++++++++----- 1 file changed, 239 insertions(+), 70 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c41dd110e9..24647910f4 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5030,50 +5030,178 @@ fn continue_after_fast_block_no_sortition() { return; } - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); - let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new( + let num_txs = 5; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr, (send_amt + send_fee) * 5)], + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, ); - let timeout = Duration::from_secs(200); - let _coord_channel = signer_test.running_nodes.coord_channel.clone(); - let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + naka_mined_blocks: blocks_mined2, + .. + } = run_loop_2.counters(); + + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + rl2_skip_commit_op.set(true); + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); signer_test.boot_to_epoch_3(); + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + let burnchain = signer_test.running_nodes.conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); let get_burn_height = || { SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height }; + let starting_peer_height = get_chain_info(&conf).stacks_tip_height; + let starting_burn_height = get_burn_height(); + let mut btc_blocks_mined = 0; - let all_signers = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); + info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let commits_before_1 = rl1_commits.load(Ordering::SeqCst); - let commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(rl1_commits.load(Ordering::SeqCst) > commits_before_1), + ) + .unwrap(); + btc_blocks_mined += 1; - info!("------------------------- Mine Normal Tenure -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + // wait for the new block to be processed + wait_for(60, || { + Ok(blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1) + }) + .unwrap(); + + info!( + "Nakamoto blocks mined: {}", + blocks_mined1.load(Ordering::SeqCst) + ); let stacks_height_before = signer_test .stacks_client @@ -5081,17 +5209,6 @@ fn continue_after_fast_block_no_sortition() { .expect("Failed to get peer info") .stacks_tip_height; - // Wait for a new block commit - wait_for(20, || { - let commits = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - // 2 because we mined one block in the normal tenure - Ok(commits - commits_before >= 2) - }) - .expect("Timed out waiting for a new block commit"); - // Make all signers ignore block proposals let ignoring_signers = all_signers.to_vec(); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -5099,21 +5216,23 @@ fn continue_after_fast_block_no_sortition() { .unwrap() .replace(ignoring_signers.clone()); - // Don't make future block commits + // Make sure miner 1 doesn't submit a block commit for the next tenure signer_test .running_nodes .nakamoto_test_skip_commit_op .set(true); - let burn_height_before = get_burn_height(); - let rejections_before = signer_test .running_nodes .nakamoto_blocks_rejected .load(Ordering::SeqCst); + // Unpause miner 2's block commits + rl2_skip_commit_op.set(false); + // Mine a new burn block - info!("------------------------- Starting Tenure B -------------------------"; + let burn_height_before = get_burn_height(); + info!("------------------------- Miner 2 Mines an Empty Tenure B -------------------------"; "burn_height_before" => burn_height_before, "rejections_before" => rejections_before, ); @@ -5124,17 +5243,12 @@ fn continue_after_fast_block_no_sortition() { || Ok(get_burn_height() > burn_height_before), ) .unwrap(); + btc_blocks_mined += 1; // assure we have a sortition let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); assert!(tip.sortition); - let burn_height_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info") - .burn_block_height; - info!("----- Waiting for block rejections -----"); let min_rejections = (num_signers as u64) * 4 / 10; // Wait until we have some block rejections @@ -5148,52 +5262,76 @@ fn continue_after_fast_block_no_sortition() { }) .expect("Timed out waiting for block rejections"); + // Make miner 2 also fail to submit commits + rl2_skip_commit_op.set(true); // Miner another block and ensure there is _no_ sortition - info!("------------------------- Mine another block -------------------------"); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || { - let burn_height = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info") - .burn_block_height; - Ok(burn_height > burn_height_before) - }, - ) - .unwrap(); + info!("------------------------- Mine Burn Block with No Sortition -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let burn_height_before = get_burn_height(); + let commits_before_1 = rl1_commits.load(Ordering::SeqCst); + let commits_before_2 = rl2_commits.load(Ordering::SeqCst); - // Verify that no Stacks blocks have been mined (signers are ignoring) + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + btc_blocks_mined += 1; + + wait_for(30, || Ok(get_burn_height() > burn_height_before)).unwrap(); + + assert_eq!(rl1_commits.load(Ordering::SeqCst), commits_before_1); + assert_eq!(rl2_commits.load(Ordering::SeqCst), commits_before_2); + assert_eq!( + blocks_mined1.load(Ordering::SeqCst), + blocks_processed_before_1 + ); + assert_eq!( + blocks_mined2.load(Ordering::SeqCst), + blocks_processed_before_2 + ); + + // Verify that no Stacks blocks have been mined (signers are ignoring) and no commits have been submitted by either miner let stacks_height = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; assert_eq!(stacks_height, stacks_height_before); - let stacks_height_before = stacks_height; + info!( + "------------------------- Miner 2 Attempts to Mine a Tenure C -------------------------" + ); + + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let burn_height_before = get_burn_height(); + let commits_before_2 = rl2_commits.load(Ordering::SeqCst); + info!("----- Enabling signers to approve proposals -----"; "stacks_height" => stacks_height_before, ); - // Allow signers to respond to proposals again TEST_REJECT_ALL_BLOCK_PROPOSAL .lock() .unwrap() .replace(Vec::new()); - wait_for(30, || { - let stacks_height = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info") - .stacks_tip_height; - Ok(stacks_height > stacks_height_before) - }) - .expect("Expected a new Stacks block to be mined"); + // Unpause miner 2's block commits + rl2_skip_commit_op.set(false); + // TODO: can combine the following three wait_for and also the next_block_and once fixed + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(rl2_commits.load(Ordering::SeqCst) > commits_before_2), + ) + .unwrap(); + btc_blocks_mined += 1; + + wait_for(30, || Ok(get_burn_height() > burn_height_before)).unwrap(); + + // TODO DELETE THIS let blocks = test_observer::get_blocks(); // Debug the last 4 blocks let blocks = blocks.iter().rev().take(4).rev().collect::>(); @@ -5211,6 +5349,37 @@ fn continue_after_fast_block_no_sortition() { } } } + + wait_for(30, || { + Ok(blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2) + }) + .expect("Timed out waiting for block to be mined and processed"); + + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }) + .expect("Expected a new Stacks block to be mined"); + + let peer_info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); + // We only successfully mine 2 stacks block in this test + assert_eq!(peer_info.stacks_tip_height, starting_peer_height + 2); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); } #[test] From e459b8cf2f51b05d1e26b4fe612534fe09f4ed0c Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 5 Nov 2024 12:28:04 -0800 Subject: [PATCH 076/109] Update test description Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 24647910f4..bad6bc6db7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5019,10 +5019,13 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } /// Test a scenario where: -/// We have one miner. During block A, there is a sortition and a TenureChange. -/// Block B is mined, but it does not contain a TenureChange (ie because a -/// new burn block was mined too quickly). -/// Then block C occurs, which does not have a sortition. +/// Two miners boot to Nakamoto. +/// Miner 1 wins the first Nakamoto tenure A. Miner 1 mines a regular stacks block N. +/// Miner 2 wins the second Nakamoto tenure B and proposes block N+1, but it is rejected by the signers. +/// An empty burn block is mined +/// Miner 2 wins the third Nakamoto tenure C. Miner 2 proposes a block N+1' which all signers accept. +/// Asserts: +/// - The stacks tip advances to N+1' #[test] #[ignore] fn continue_after_fast_block_no_sortition() { From 554c9632ebee76a0c16eedb15b57dff708b128eb Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Tue, 5 Nov 2024 13:26:00 -0800 Subject: [PATCH 077/109] Update the changelogs --- CHANGELOG.md | 15 +++++++++++++++ stacks-signer/CHANGELOG.md | 16 +++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e7caac38fe..64a237ba01 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,21 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Add index for StacksBlockId to nakamoto block headers table (improves node performance) - Remove the panic for reporting DB deadlocks (just error and continue waiting) +## [3.0.0.0.1] + +### Changed +- Add index for StacksBlockId to nakamoto block headers table (improves node performance) +- Remove the panic for reporting DB deadlocks (just error and continue waiting) +- Various test fixes for CI (5353, 5368, 5372, 5371, 5380, 5378, 5387, 5396, 5390, 5394) +- Various log fixes: + - don't say proceeding to mine blocks if not a miner + - misc. warns downgraded to debugs +- 5391: Update default block proposal timeout to 10 minutes +- 5406: After block rejection, miner pauses +- Docs fixes + - Fix signer docs link + - Specify burn block in clarity docs + ## [3.0.0.0.0] ### Added diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 489fd39cf7..3183c0d5c3 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,7 +11,21 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed -## [3.0.0.0.0] +## [3.0.0.0.1.0] + +### Changed + +- Change block rejection message to generic block response + +## [3.0.0.0.0.1] + +### Added + +### Changed +- Update block proposal timeout default to 10 minutes (#5391) +- Updated documentation link in output (#5363) + +## [3.0.0.0.0.0] ### Added From 77d4075b7c6d6c41f17d6527c74c7400ec807835 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 5 Nov 2024 16:40:46 -0500 Subject: [PATCH 078/109] test: attempt to increase `pox_sync_sample_secs` --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 8ed453efe1..ba2f067928 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3723,7 +3723,7 @@ fn follower_bootup_across_multiple_cycles() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.node.pox_sync_sample_secs = 30; + naka_conf.node.pox_sync_sample_secs = 180; naka_conf.burnchain.max_rbf = 10_000_000; let sender_sk = Secp256k1PrivateKey::new(); From 45adc33f8bf40a643a756cbd5d6ffbe4859dd50b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 5 Nov 2024 17:03:21 -0500 Subject: [PATCH 079/109] chore: address remaining PR feedback and get tests to pass --- stackslib/src/burnchains/burnchain.rs | 18 ++- stackslib/src/burnchains/db.rs | 55 ++++---- .../nakamoto/download_state_machine.rs | 21 ++-- .../nakamoto/tenure_downloader_set.rs | 117 ++++++++++-------- stackslib/src/net/mod.rs | 61 ++------- stackslib/src/net/p2p.rs | 4 +- 6 files changed, 129 insertions(+), 147 deletions(-) diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index a5ecaa0458..c1d07994d7 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -702,6 +702,10 @@ impl Burnchain { } pub fn get_burnchaindb_path(&self) -> String { + if self.working_dir.as_str() == ":memory:" { + return ":memory:".to_string(); + } + let chainstate_dir = Burnchain::get_chainstate_path_str(&self.working_dir); let mut db_pathbuf = PathBuf::from(&chainstate_dir); db_pathbuf.push("burnchain.sqlite"); @@ -743,12 +747,14 @@ impl Burnchain { /// Open just the burnchain database pub fn open_burnchain_db(&self, readwrite: bool) -> Result { let burnchain_db_path = self.get_burnchaindb_path(); - if let Err(e) = fs::metadata(&burnchain_db_path) { - warn!( - "Failed to stat burnchain DB path '{}': {:?}", - &burnchain_db_path, &e - ); - return Err(burnchain_error::DBError(db_error::NoDBError)); + if burnchain_db_path != ":memory:" { + if let Err(e) = fs::metadata(&burnchain_db_path) { + warn!( + "Failed to stat burnchain DB path '{}': {:?}", + &burnchain_db_path, &e + ); + return Err(burnchain_error::DBError(db_error::NoDBError)); + } } test_debug!( "Open burnchain DB at {} (rw? {})", diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 72ca2e8bf1..d5f1e18804 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -1000,33 +1000,38 @@ impl BurnchainDB { readwrite: bool, ) -> Result { let mut create_flag = false; - let open_flags = match fs::metadata(path) { - Err(e) => { - if e.kind() == io::ErrorKind::NotFound { - // need to create - if readwrite { - create_flag = true; - let ppath = Path::new(path); - let pparent_path = ppath - .parent() - .unwrap_or_else(|| panic!("BUG: no parent of '{}'", path)); - fs::create_dir_all(&pparent_path) - .map_err(|e| BurnchainError::from(DBError::IOError(e)))?; - - OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE + let open_flags = if path == ":memory:" { + create_flag = true; + OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE + } else { + match fs::metadata(path) { + Err(e) => { + if e.kind() == io::ErrorKind::NotFound { + // need to create + if readwrite { + create_flag = true; + let ppath = Path::new(path); + let pparent_path = ppath + .parent() + .unwrap_or_else(|| panic!("BUG: no parent of '{}'", path)); + fs::create_dir_all(&pparent_path) + .map_err(|e| BurnchainError::from(DBError::IOError(e)))?; + + OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE + } else { + return Err(BurnchainError::from(DBError::NoDBError)); + } } else { - return Err(BurnchainError::from(DBError::NoDBError)); + return Err(BurnchainError::from(DBError::IOError(e))); } - } else { - return Err(BurnchainError::from(DBError::IOError(e))); } - } - Ok(_md) => { - // can just open - if readwrite { - OpenFlags::SQLITE_OPEN_READ_WRITE - } else { - OpenFlags::SQLITE_OPEN_READ_ONLY + Ok(_md) => { + // can just open + if readwrite { + OpenFlags::SQLITE_OPEN_READ_WRITE + } else { + OpenFlags::SQLITE_OPEN_READ_ONLY + } } } }; @@ -1089,7 +1094,7 @@ impl BurnchainDB { let conn = sqlite_open(path, open_flags, true)?; let mut db = BurnchainDB { conn }; - if readwrite { + if readwrite || path == ":memory:" { db.add_indexes()?; } Ok(db) diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index c2b66beab1..42d228aca1 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -1418,16 +1418,19 @@ impl NakamotoDownloadStateMachine { chainstate: &StacksChainState, ibd: bool, ) -> HashMap> { - debug!("NakamotoDownloadStateMachine in state {}", &self.state); + debug!( + "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}, state={}", + burnchain_height, network.burnchain_tip.block_height, &self.state; + "has_network_inventories" => network.inv_state_nakamoto.is_some(), + "next_unconfirmed_check" => self.last_unconfirmed_download_check_ms.saturating_add(CHECK_UNCONFIRMED_TENURES_MS) / 1000, + "timestamp_ms" => get_epoch_time_ms(), + ); + let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - debug!("No network inventories"); return HashMap::new(); }; - debug!( - "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}, state={}", - burnchain_height, network.burnchain_tip.block_height, &self.state - ); + self.update_available_tenures( &invs.inventories, &sortdb.pox_constants, @@ -1441,12 +1444,6 @@ impl NakamotoDownloadStateMachine { .saturating_add(CHECK_UNCONFIRMED_TENURES_MS) > get_epoch_time_ms() { - debug!( - "Throttle checking for unconfirmed tenures until {}", - self.last_unconfirmed_download_check_ms - .saturating_add(CHECK_UNCONFIRMED_TENURES_MS) - / 1000 - ); false } else { let do_fetch = Self::need_unconfirmed_tenures( diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 971bf94db8..b5514558b8 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -156,12 +156,23 @@ impl NakamotoTenureDownloaderSet { ); } + /// Mark a peer and its tenure as dead and failed + fn mark_failed_and_deprioritize_peer( + attempted_failed_tenures: &mut HashMap, + deprioritized_peers: &mut HashMap, + ch: &ConsensusHash, + peer: &NeighborAddress, + ) { + Self::mark_failure(attempted_failed_tenures, ch); + Self::mark_deprioritized(deprioritized_peers, peer); + } + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if /// needed. fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, &naddr + "Add downloader for tenure {} driven by {naddr}", + &downloader.tenure_id_consensus_hash ); if let Some(idx) = self.peers.get(&naddr) { self.downloaders[*idx] = Some(downloader); @@ -215,7 +226,7 @@ impl NakamotoTenureDownloaderSet { ) { for (naddr, downloader) in iter { if self.has_downloader(&naddr) { - debug!("Already have downloader for {}", &naddr); + debug!("Already have downloader for {naddr}"); continue; } self.add_downloader(naddr, downloader); @@ -270,8 +281,8 @@ impl NakamotoTenureDownloaderSet { }; debug!( - "Peer {} already bound to downloader for {}", - &naddr, &_downloader.tenure_id_consensus_hash + "Peer {naddr} already bound to downloader for {}", + &_downloader.tenure_id_consensus_hash ); return true; } @@ -283,8 +294,8 @@ impl NakamotoTenureDownloaderSet { continue; } debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + "Assign peer {naddr} to work on downloader for {} in state {}", + &downloader.tenure_id_consensus_hash, &downloader.state ); downloader.naddr = naddr.clone(); self.peers.insert(naddr, i); @@ -304,14 +315,14 @@ impl NakamotoTenureDownloaderSet { continue; }; let Some(downloader) = downloader_opt.as_ref() else { - debug!("Remove peer {} for null download {}", &naddr, i); + debug!("Remove peer {naddr} for null download {i}"); idled.push(naddr.clone()); continue; }; if downloader.idle { debug!( - "Remove idled peer {} for tenure download {}", - &naddr, &downloader.tenure_id_consensus_hash + "Remove idled peer {naddr} for tenure download {}", + &downloader.tenure_id_consensus_hash ); idled.push(naddr.clone()); } @@ -325,6 +336,7 @@ impl NakamotoTenureDownloaderSet { /// this up with a call to `clear_available_peers()`. pub fn clear_finished_downloaders(&mut self) { for downloader_opt in self.downloaders.iter_mut() { + // clear the downloader if it's done by setting it to None if downloader_opt .as_ref() .map(|dl| dl.is_done()) @@ -359,8 +371,8 @@ impl NakamotoTenureDownloaderSet { }; if &downloader.tenure_id_consensus_hash == tenure_id { debug!( - "Have downloader for tenure {} already (idle={}, state={}, naddr={})", - tenure_id, downloader.idle, &downloader.state, &downloader.naddr + "Have downloader for tenure {tenure_id} already (idle={}, state={}, naddr={})", + downloader.idle, &downloader.state, &downloader.naddr ); return true; } @@ -396,25 +408,24 @@ impl NakamotoTenureDownloaderSet { }; let Some(neighbors) = available.get_mut(ch) else { // not found on any neighbors, so stop trying this tenure - debug!("No neighbors have tenure {}", ch); + debug!("No neighbors have tenure {ch}"); schedule.pop_front(); continue; }; if neighbors.is_empty() { // no more neighbors to try - debug!("No more neighbors can serve tenure {}", ch); + debug!("No more neighbors can serve tenure {ch}"); schedule.pop_front(); continue; } let Some(naddr) = neighbors.pop() else { - debug!("No more neighbors can serve tenure {}", ch); + debug!("No more neighbors can serve tenure {ch}"); schedule.pop_front(); continue; }; if get_epoch_time_secs() < *self.deprioritized_peers.get(&naddr).unwrap_or(&0) { debug!( - "Peer {} is deprioritized until {}", - &naddr, + "Peer {} is deprioritized until {naddr}", self.deprioritized_peers.get(&naddr).unwrap_or(&0) ); continue; @@ -430,18 +441,18 @@ impl NakamotoTenureDownloaderSet { let Some(available_tenures) = tenure_block_ids.get(&naddr) else { // this peer doesn't have any known tenures, so try the others - debug!("No tenures available from {}", &naddr); + debug!("No tenures available from {naddr}"); continue; }; let Some(tenure_info) = available_tenures.get(ch) else { // this peer does not have a tenure start/end block for this tenure, so try the // others. - debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + debug!("Neighbor {naddr} does not serve tenure {ch}"); continue; }; if tenure_info.processed { // we already have this tenure - debug!("Already have processed tenure {}", ch); + debug!("Already have processed tenure {ch}"); self.completed_tenures .remove(&CompletedTenure::from(tenure_info)); continue; @@ -451,8 +462,8 @@ impl NakamotoTenureDownloaderSet { .contains(&CompletedTenure::from(tenure_info)) { debug!( - "Already successfully downloaded tenure {} ({}-{})", - &ch, &tenure_info.start_block_id, &tenure_info.end_block_id + "Already successfully downloaded tenure {ch} ({}-{})", + &tenure_info.start_block_id, &tenure_info.end_block_id ); schedule.pop_front(); continue; @@ -462,9 +473,8 @@ impl NakamotoTenureDownloaderSet { .map(|cycle_info| cycle_info.reward_set()) else { debug!( - "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {tenure_info:?}", tenure_info.start_reward_cycle, - &tenure_info ); schedule.pop_front(); continue; @@ -474,9 +484,8 @@ impl NakamotoTenureDownloaderSet { .map(|cycle_info| cycle_info.reward_set()) else { debug!( - "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {tenure_info:?}", tenure_info.end_reward_cycle, - &tenure_info ); schedule.pop_front(); continue; @@ -488,7 +497,7 @@ impl NakamotoTenureDownloaderSet { let attempt_failed_count = *self.attempt_failed_tenures.get(&ch).unwrap_or(&0); - info!("Download tenure {}", &ch; + info!("Download tenure {ch}"; "peer" => %naddr, "attempt" => attempt_count.saturating_add(1), "failed" => attempt_failed_count, @@ -511,7 +520,7 @@ impl NakamotoTenureDownloaderSet { end_reward_set.clone(), ); - debug!("Request tenure {} from neighbor {}", ch, &naddr); + debug!("Request tenure {ch} from neighbor {naddr}"); self.add_downloader(naddr, tenure_download); schedule.pop_front(); } @@ -540,36 +549,37 @@ impl NakamotoTenureDownloaderSet { // send requests for (naddr, index) in self.peers.iter() { if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", &naddr); + debug!("Peer {naddr} has an inflight request"); continue; } let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); + debug!("No downloader for {naddr}"); continue; }; if downloader.is_done() { debug!( - "Downloader for {} on tenure {} is finished", - &naddr, &downloader.tenure_id_consensus_hash + "Downloader for {naddr} on tenure {} is finished", + &downloader.tenure_id_consensus_hash ); finished.push(naddr.clone()); finished_tenures.push(CompletedTenure::from(downloader)); continue; } debug!( - "Send request to {} for tenure {} (state {})", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + "Send request to {naddr} for tenure {} (state {})", + &downloader.tenure_id_consensus_hash, &downloader.state ); let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { info!( - "Downloader for tenure {} to {} failed; this peer is dead", - &downloader.tenure_id_consensus_hash, &naddr + "Downloader for tenure {} to {naddr} failed; this peer is dead", + &downloader.tenure_id_consensus_hash, ); - Self::mark_failure( + Self::mark_failed_and_deprioritize_peer( &mut self.attempt_failed_tenures, + &mut self.deprioritized_peers, &downloader.tenure_id_consensus_hash, + naddr, ); - Self::mark_deprioritized(&mut self.deprioritized_peers, &naddr); neighbor_rpc.add_dead(network, naddr); continue; }; @@ -583,12 +593,12 @@ impl NakamotoTenureDownloaderSet { // clear dead, broken, and done for naddr in addrs.iter() { if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); + debug!("Remove dead/broken downloader for {naddr}"); self.clear_downloader(&naddr); } } for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); + debug!("Remove finished downloader for {done_naddr}"); self.clear_downloader(&done_naddr); } for done_tenure in finished_tenures.drain(..) { @@ -598,34 +608,35 @@ impl NakamotoTenureDownloaderSet { // handle responses for (naddr, response) in neighbor_rpc.collect_replies(network) { let Some(index) = self.peers.get(&naddr) else { - debug!("No downloader for {}", &naddr); + debug!("No downloader for {naddr}"); continue; }; let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); + debug!("No downloader for {naddr}"); continue; }; - debug!("Got response from {}", &naddr); + debug!("Got response from {naddr}"); let Ok(blocks_opt) = downloader .handle_next_download_response(response) .map_err(|e| { info!( - "Failed to handle response from {} on tenure {}: {:?}", - &naddr, &downloader.tenure_id_consensus_hash, &e + "Failed to handle response from {naddr} on tenure {}: {e}", + &downloader.tenure_id_consensus_hash, ); e }) else { debug!( - "Failed to handle download response from {} on tenure {}", - &naddr, &downloader.tenure_id_consensus_hash + "Failed to handle download response from {naddr} on tenure {}", + &downloader.tenure_id_consensus_hash ); - Self::mark_failure( + Self::mark_failed_and_deprioritize_peer( &mut self.attempt_failed_tenures, + &mut self.deprioritized_peers, &downloader.tenure_id_consensus_hash, + &naddr, ); - Self::mark_deprioritized(&mut self.deprioritized_peers, &naddr); neighbor_rpc.add_dead(network, &naddr); continue; }; @@ -646,8 +657,8 @@ impl NakamotoTenureDownloaderSet { &downloader.tenure_id_consensus_hash ); debug!( - "Downloader for tenure {} finished on {}", - &downloader.tenure_id_consensus_hash, &naddr + "Downloader for tenure {} finished on {naddr}", + &downloader.tenure_id_consensus_hash, ); finished.push(naddr.clone()); finished_tenures.push(CompletedTenure::from(downloader)); @@ -658,12 +669,12 @@ impl NakamotoTenureDownloaderSet { // clear dead, broken, and done for naddr in addrs.iter() { if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); + debug!("Remove dead/broken downloader for {naddr}"); self.clear_downloader(naddr); } } for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); + debug!("Remove finished downloader for {done_naddr}"); self.clear_downloader(&done_naddr); } for done_tenure in finished_tenures.drain(..) { diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 86f3d8578f..9e17c3f428 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1585,16 +1585,10 @@ impl NetworkResult { }) .collect::>() }) - .collect::>>() + .flatten() }) - .collect::>>>() - .into_iter() .flatten() - .into_iter() - .fold(HashSet::new(), |mut acc, next| { - acc.extend(next.into_iter()); - acc - }); + .collect(); let uploaded_blocks: HashSet<_> = self .uploaded_blocks @@ -1604,14 +1598,9 @@ impl NetworkResult { .blocks .iter() .map(|blk| StacksBlockId::new(&blk.0, &blk.1.block_hash())) - .collect::>() }) - .collect::>>() - .into_iter() - .fold(HashSet::new(), |mut acc, next| { - acc.extend(next.into_iter()); - acc - }); + .flatten() + .collect(); blocks.extend(pushed_blocks.into_iter()); blocks.extend(uploaded_blocks.into_iter()); @@ -1623,18 +1612,9 @@ impl NetworkResult { let mut mblocks: HashSet<_> = self .confirmed_microblocks .iter() - .map(|(_, mblocks, _)| { - mblocks - .iter() - .map(|mblk| mblk.block_hash()) - .collect::>() - }) - .collect::>>() - .into_iter() - .fold(HashSet::new(), |mut acc, next| { - acc.extend(next.into_iter()); - acc - }); + .map(|(_, mblocks, _)| mblocks.iter().map(|mblk| mblk.block_hash())) + .flatten() + .collect(); let pushed_microblocks: HashSet<_> = self .pushed_microblocks @@ -1647,35 +1627,18 @@ impl NetworkResult { .microblocks .iter() .map(|mblock| mblock.block_hash()) - .collect::>() }) - .collect::>>() + .flatten() }) - .collect::>>>() - .into_iter() .flatten() - .into_iter() - .fold(HashSet::new(), |mut acc, next| { - acc.extend(next.into_iter()); - acc - }); + .collect(); let uploaded_microblocks: HashSet<_> = self .uploaded_microblocks .iter() - .map(|mblk_data| { - mblk_data - .microblocks - .iter() - .map(|mblk| mblk.block_hash()) - .collect::>() - }) - .collect::>>() - .into_iter() - .fold(HashSet::new(), |mut acc, next| { - acc.extend(next.into_iter()); - acc - }); + .map(|mblk_data| mblk_data.microblocks.iter().map(|mblk| mblk.block_hash())) + .flatten() + .collect(); mblocks.extend(pushed_microblocks.into_iter()); mblocks.extend(uploaded_microblocks.into_iter()); diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 46821069cb..d77c0df9fa 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -5284,7 +5284,7 @@ mod test { network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), network_name: "testnet".to_string(), - working_dir: "/nope".to_string(), + working_dir: ":memory:".to_string(), consensus_hash_lifetime: 24, stable_confirmations: 7, initial_reward_start_block: 50, @@ -5315,7 +5315,7 @@ mod test { let atlas_config = AtlasConfig::new(false); let atlasdb = AtlasDB::connect_memory(atlas_config).unwrap(); let stacker_db = StackerDBs::connect_memory(); - let burnchain_db = burnchain.open_burnchain_db(false).unwrap(); + let burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); let local_peer = PeerDB::get_local_peer(db.conn()).unwrap(); let p2p = PeerNetwork::new( From 25cd9d986d942d0ce0688ade209d204246984c6d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 5 Nov 2024 14:22:26 -0800 Subject: [PATCH 080/109] Update test to be more streamlined and ensure no extra commits make it in Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 164 ++++++++++++--------- 1 file changed, 98 insertions(+), 66 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index bad6bc6db7..80aca7291e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5023,7 +5023,9 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { /// Miner 1 wins the first Nakamoto tenure A. Miner 1 mines a regular stacks block N. /// Miner 2 wins the second Nakamoto tenure B and proposes block N+1, but it is rejected by the signers. /// An empty burn block is mined -/// Miner 2 wins the third Nakamoto tenure C. Miner 2 proposes a block N+1' which all signers accept. +/// TODO: which behaviour do we want to enforce? Should we allow both? If so, we should force both explicitly +/// Miner 1 should issue a tenure extend and propose block N+1' which is accepted by the signers OR +/// Miner 2 should issue a new TenureChangePayload followed by a TenureExtend. /// Asserts: /// - The stacks tip advances to N+1' #[test] @@ -5034,11 +5036,13 @@ fn continue_after_fast_block_no_sortition() { } let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let num_txs = 5; + let num_txs = 1; + let sender_nonce = 0; let btc_miner_1_seed = vec![1, 1, 1, 1]; let btc_miner_2_seed = vec![2, 2, 2, 2]; @@ -5129,6 +5133,7 @@ fn continue_after_fast_block_no_sortition() { conf.burnchain.chain_id, conf.burnchain.peer_version, ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); let run_loop_stopper_2 = run_loop_2.get_termination_switch(); @@ -5145,6 +5150,7 @@ fn continue_after_fast_block_no_sortition() { info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + // Make sure Miner 2 cannot win a sortition at first. rl2_skip_commit_op.set(true); let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) @@ -5185,16 +5191,23 @@ fn continue_after_fast_block_no_sortition() { info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); - let commits_before_1 = rl1_commits.load(Ordering::SeqCst); + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit a block commit for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(rl1_commits.load(Ordering::SeqCst) > commits_before_1), - ) - .unwrap(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); btc_blocks_mined += 1; + // assure we have a sortition + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + // wait for the new block to be processed wait_for(60, || { Ok(blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1) @@ -5212,6 +5225,7 @@ fn continue_after_fast_block_no_sortition() { .expect("Failed to get peer info") .stacks_tip_height; + info!("------------------------- Make Signers Reject All Subsequent Proposals -------------------------"); // Make all signers ignore block proposals let ignoring_signers = all_signers.to_vec(); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -5219,21 +5233,27 @@ fn continue_after_fast_block_no_sortition() { .unwrap() .replace(ignoring_signers.clone()); - // Make sure miner 1 doesn't submit a block commit for the next tenure - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .set(true); - + info!("------------------------- Unpause Miner 2's Block Commits -------------------------"); let rejections_before = signer_test .running_nodes .nakamoto_blocks_rejected .load(Ordering::SeqCst); + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); // Unpause miner 2's block commits rl2_skip_commit_op.set(false); - // Mine a new burn block + info!("------------------------- Wait for Miner 2's Block Commit Submission -------------------------"); + // Ensure the miner 2 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .unwrap(); + + // Make miner 2 also fail to submit any FURTHER block commits + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + rl2_skip_commit_op.set(true); + let burn_height_before = get_burn_height(); info!("------------------------- Miner 2 Mines an Empty Tenure B -------------------------"; "burn_height_before" => burn_height_before, @@ -5253,20 +5273,29 @@ fn continue_after_fast_block_no_sortition() { assert!(tip.sortition); info!("----- Waiting for block rejections -----"); - let min_rejections = (num_signers as u64) * 4 / 10; + let min_rejections = num_signers * 4 / 10; // Wait until we have some block rejections wait_for(30, || { - let rejections = signer_test - .running_nodes - .nakamoto_blocks_rejected - .load(Ordering::SeqCst); - let rejections_diff = rejections - rejections_before; - Ok(rejections_diff >= min_rejections) + std::thread::sleep(Duration::from_secs(1)); + let chunks = test_observer::get_stackerdb_chunks(); + let rejections: Vec<_> = chunks + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter(|chunk| { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + return false; + }; + matches!( + message, + SignerMessage::BlockResponse(BlockResponse::Rejected(_)) + ) + }) + .collect(); + Ok(rejections.len() >= min_rejections) }) .expect("Timed out waiting for block rejections"); - // Make miner 2 also fail to submit commits - rl2_skip_commit_op.set(true); // Miner another block and ensure there is _no_ sortition info!("------------------------- Mine Burn Block with No Sortition -------------------------"); let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); @@ -5294,6 +5323,10 @@ fn continue_after_fast_block_no_sortition() { blocks_processed_before_2 ); + // assure we have NO sortition + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(!tip.sortition); + // Verify that no Stacks blocks have been mined (signers are ignoring) and no commits have been submitted by either miner let stacks_height = signer_test .stacks_client @@ -5303,56 +5336,32 @@ fn continue_after_fast_block_no_sortition() { assert_eq!(stacks_height, stacks_height_before); let stacks_height_before = stacks_height; - info!( - "------------------------- Miner 2 Attempts to Mine a Tenure C -------------------------" - ); - let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); - let burn_height_before = get_burn_height(); - let commits_before_2 = rl2_commits.load(Ordering::SeqCst); - info!("----- Enabling signers to approve proposals -----"; "stacks_height" => stacks_height_before, ); + + let nmb_old_blocks = test_observer::get_blocks().len(); // Allow signers to respond to proposals again TEST_REJECT_ALL_BLOCK_PROPOSAL .lock() .unwrap() .replace(Vec::new()); - // Unpause miner 2's block commits - rl2_skip_commit_op.set(false); - - // TODO: can combine the following three wait_for and also the next_block_and once fixed - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 60, - || Ok(rl2_commits.load(Ordering::SeqCst) > commits_before_2), - ) - .unwrap(); - btc_blocks_mined += 1; - - wait_for(30, || Ok(get_burn_height() > burn_height_before)).unwrap(); - - // TODO DELETE THIS - let blocks = test_observer::get_blocks(); - // Debug the last 4 blocks - let blocks = blocks.iter().rev().take(4).rev().collect::>(); - for block in blocks { - println!("\n\n"); - info!("Block: {}", serde_json::to_string_pretty(&block).unwrap()); - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter().rev() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx != "0x00" { - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = - StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); - info!("Tx: {}", serde_json::to_string_pretty(&parsed).unwrap()); - } - } - } + // submit a tx so that the miner will mine an extra block just in case due to timing constraints, the first block with the tenure extend was + // rejected already by the signers + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + // TODO: combine these wait fors once fixed code + // wait for the new block to be processed wait_for(30, || { Ok(blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2) }) @@ -5368,14 +5377,37 @@ fn continue_after_fast_block_no_sortition() { }) .expect("Expected a new Stacks block to be mined"); + wait_for( + 30, + || Ok(test_observer::get_blocks().len() > nmb_old_blocks), + ) + .expect("Timed out waiting for test observer to see new block"); + + let blocks = test_observer::get_blocks(); + let tenure_extend_block = if nmb_old_blocks + 1 == test_observer::get_blocks().len() { + blocks.last().unwrap() + } else { + &blocks[blocks.len() - 2] + }; + let transactions = tenure_extend_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) + if payload.cause == TenureChangeCause::Extended => {} + _ => panic!("Expected tenure extend transaction, got {parsed:?}"), + }; + let peer_info = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); - // We only successfully mine 2 stacks block in this test - assert_eq!(peer_info.stacks_tip_height, starting_peer_height + 2); + // We successfully mine at least 2 stacks block in this test + assert!(peer_info.stacks_tip_height >= starting_peer_height + 2); rl2_coord_channels .lock() .expect("Mutex poisoned") From c80fa544d176d85a1741b958bc3e11f6c891bf97 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 5 Nov 2024 15:32:55 -0800 Subject: [PATCH 081/109] CRC: fix typo in test descriptions and fix imports Signed-off-by: Jacinta Ferrant --- stacks-signer/src/chainstate.rs | 5 ++--- stacks-signer/src/signerdb.rs | 2 +- stackslib/src/chainstate/stacks/mod.rs | 3 +-- testnet/stacks-node/src/tests/signer/v0.rs | 4 ++-- 4 files changed, 6 insertions(+), 8 deletions(-) diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 4159e2fe83..fa24c8b22e 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -19,10 +19,9 @@ use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::TenureChangePayload; use blockstack_lib::net::api::getsortition::SortitionInfo; use blockstack_lib::util_lib::db::Error as DBError; -use clarity::types::chainstate::BurnchainHeaderHash; -use clarity::util::get_epoch_time_secs; use slog::{slog_info, slog_warn}; -use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; +use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash, StacksPublicKey}; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Hash160; use stacks_common::{info, warn}; diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 06b9d703c3..1d2e650207 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -24,7 +24,6 @@ use blockstack_lib::util_lib::db::{ Error as DBError, }; use clarity::types::chainstate::{BurnchainHeaderHash, StacksAddress}; -use clarity::util::get_epoch_time_secs; use libsigner::BlockProposal; use rusqlite::{ params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Transaction, @@ -33,6 +32,7 @@ use serde::{Deserialize, Serialize}; use slog::{slog_debug, slog_error}; use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, define_u8_enum, error}; diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 8af9cf6ec7..fd370a8b12 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1101,13 +1101,12 @@ pub const MAX_MICROBLOCK_SIZE: u32 = 65536; #[cfg(test)] pub mod test { - use clarity::util::get_epoch_time_secs; use clarity::vm::representations::{ClarityName, ContractName}; use clarity::vm::ClarityVersion; use stacks_common::bitvec::BitVec; use stacks_common::util::hash::*; - use stacks_common::util::log; use stacks_common::util::secp256k1::Secp256k1PrivateKey; + use stacks_common::util::{get_epoch_time_secs, log}; use super::*; use crate::chainstate::burn::BlockSnapshot; diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 6e97112ffb..7d373a014f 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4501,7 +4501,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { #[test] #[ignore] -/// Test that signers that have accept a locally signed block N+1 built in tenure A can sign a block proposed during a +/// Test that signers that have accepedt a locally signed block N+1 built in tenure A can sign a block proposed during a /// new tenure B built upon the last globally accepted block N if the timeout is exceeded, i.e. a reorg can occur at a tenure boundary. /// /// Test Setup: @@ -4725,7 +4725,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { #[test] #[ignore] -/// Test that signers that have accept a locally signed block N+1 built in tenure A cannot sign a block proposed during a +/// Test that signers that have accepted a locally signed block N+1 built in tenure A cannot sign a block proposed during a /// new tenure B built upon the last globally accepted block N if the timeout is not exceeded, i.e. a reorg cannot occur at a tenure boundary /// before the specified timeout has been exceeded. /// From ee2ef2dac54a97036c8f3952b796124d5195e876 Mon Sep 17 00:00:00 2001 From: janniks Date: Wed, 6 Nov 2024 16:47:38 +0100 Subject: [PATCH 082/109] test: fix too long txids --- .../src/chainstate/stacks/db/transactions.rs | 21 +++---------------- 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 456f544645..e9de9139a2 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -6876,12 +6876,7 @@ pub mod test { mode, origin, &ft_transfer_2, - Txid( - "1232121232121232121232121232121232121232121232121232121232121232" - .as_bytes() - .try_into() - .unwrap(), - ), + Txid([0; 32]), ) .unwrap(); if result != expected_result { @@ -7235,12 +7230,7 @@ pub mod test { mode, origin, &nft_transfer_2, - Txid( - "1232121232121232121232121232121232121232121232121232121232121232" - .as_bytes() - .try_into() - .unwrap(), - ), + Txid([0; 32]), ) .unwrap(); if result != expected_result { @@ -8058,12 +8048,7 @@ pub mod test { post_condition_mode, origin_account, asset_map, - Txid( - "1232121232121232121232121232121232121232121232121232121232121232" - .as_bytes() - .try_into() - .unwrap(), - ), + Txid([0; 32]), ) .unwrap(); if result != expected_result { From d9485b6ad15a61d4feacc12f065ee7dc81f73ec4 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 6 Nov 2024 07:56:09 -0800 Subject: [PATCH 083/109] CRC: typo in comment Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7d373a014f..6cf2d29fba 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -4501,7 +4501,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { #[test] #[ignore] -/// Test that signers that have accepedt a locally signed block N+1 built in tenure A can sign a block proposed during a +/// Test that signers that have accepted a locally signed block N+1 built in tenure A can sign a block proposed during a /// new tenure B built upon the last globally accepted block N if the timeout is exceeded, i.e. a reorg can occur at a tenure boundary. /// /// Test Setup: From cf1d13cd59d5713c30b14a35c1cd2ffe029ef75d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 6 Nov 2024 15:22:45 -0800 Subject: [PATCH 084/109] Add fast blocks test to CI Signed-off-by: Jacinta Ferrant --- .github/workflows/bitcoin-tests.yml | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 54 ++++++++++++++++------ 2 files changed, 42 insertions(+), 13 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 23eed46f1e..f8b3fb7547 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -120,6 +120,7 @@ jobs: - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend - tests::signer::v0::multiple_miners_with_custom_chain_id + - tests::signer::v0::continue_after_fast_block_no_sortition - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 80aca7291e..529d8a26e1 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -43,7 +43,7 @@ use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STA use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{hex_bytes, MerkleHashFunc}; +use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -5023,11 +5023,14 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { /// Miner 1 wins the first Nakamoto tenure A. Miner 1 mines a regular stacks block N. /// Miner 2 wins the second Nakamoto tenure B and proposes block N+1, but it is rejected by the signers. /// An empty burn block is mined -/// TODO: which behaviour do we want to enforce? Should we allow both? If so, we should force both explicitly -/// Miner 1 should issue a tenure extend and propose block N+1' which is accepted by the signers OR -/// Miner 2 should issue a new TenureChangePayload followed by a TenureExtend. +/// Miner 2 issue a new TenureChangePayload in block N+1' +/// Signers accept the new TenureChangePayload and the stacks tip should advance to N+1' +/// Miner 2 issue a TenureExtend in block proposal N+2' +/// Signers accept the TenureExtend and the stacks tip should advance to N+2' /// Asserts: -/// - The stacks tip advances to N+1' +/// - Block N+1' contains the TenureChangePayload +/// - Block N+2 contains the TenureExtend +/// - The stacks tip advances to N+2' #[test] #[ignore] fn continue_after_fast_block_no_sortition() { @@ -5152,6 +5155,9 @@ fn continue_after_fast_block_no_sortition() { // Make sure Miner 2 cannot win a sortition at first. rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) @@ -5170,6 +5176,11 @@ fn continue_after_fast_block_no_sortition() { }) .expect("Timed out waiting for boostrapped node to catch up to the miner"); + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private(&conf.miner.mining_key.unwrap())); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap())); + debug!("The miner key for miner 1 is {mining_pkh_1}"); + debug!("The miner key for miner 2 is {mining_pkh_2}"); + info!("------------------------- Reached Epoch 3.0 -------------------------"); let burnchain = signer_test.running_nodes.conf.get_burnchain(); @@ -5189,15 +5200,22 @@ fn continue_after_fast_block_no_sortition() { let starting_burn_height = get_burn_height(); let mut btc_blocks_mined = 0; - info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); - let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); info!("------------------------- Pause Miner 1's Block Commit -------------------------"); - // Make sure miner 1 doesn't submit a block commit for the next tenure BEFORE mining the bitcoin block + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block signer_test .running_nodes .nakamoto_test_skip_commit_op .set(true); + info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + signer_test .running_nodes .btc_regtest_controller @@ -5210,7 +5228,12 @@ fn continue_after_fast_block_no_sortition() { // wait for the new block to be processed wait_for(60, || { - Ok(blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1) + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 && stacks_height > stacks_height_before) }) .unwrap(); @@ -5219,13 +5242,14 @@ fn continue_after_fast_block_no_sortition() { blocks_mined1.load(Ordering::SeqCst) ); + info!("------------------------- Make Signers Reject All Subsequent Proposals -------------------------"); + let stacks_height_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; - info!("------------------------- Make Signers Reject All Subsequent Proposals -------------------------"); // Make all signers ignore block proposals let ignoring_signers = all_signers.to_vec(); TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -5233,7 +5257,7 @@ fn continue_after_fast_block_no_sortition() { .unwrap() .replace(ignoring_signers.clone()); - info!("------------------------- Unpause Miner 2's Block Commits -------------------------"); + info!("------------------------- Submit Miner 2 Block Commit -------------------------"); let rejections_before = signer_test .running_nodes .nakamoto_blocks_rejected @@ -5243,7 +5267,6 @@ fn continue_after_fast_block_no_sortition() { // Unpause miner 2's block commits rl2_skip_commit_op.set(false); - info!("------------------------- Wait for Miner 2's Block Commit Submission -------------------------"); // Ensure the miner 2 submits a block commit before mining the bitcoin block wait_for(30, || { Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) @@ -5337,7 +5360,7 @@ fn continue_after_fast_block_no_sortition() { let stacks_height_before = stacks_height; let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); - info!("----- Enabling signers to approve proposals -----"; + info!("------------------------- Enabling signers to approve proposals -------------------------"; "stacks_height" => stacks_height_before, ); @@ -5348,6 +5371,7 @@ fn continue_after_fast_block_no_sortition() { .unwrap() .replace(Vec::new()); + info!("------------------------- Mining Interim Block -------------------------"); // submit a tx so that the miner will mine an extra block just in case due to timing constraints, the first block with the tenure extend was // rejected already by the signers let transfer_tx = make_stacks_transfer( @@ -5383,6 +5407,10 @@ fn continue_after_fast_block_no_sortition() { ) .expect("Timed out waiting for test observer to see new block"); + info!( + "------------------------- Verify Tenure Extend Tx from Miner B -------------------------" + ); + let blocks = test_observer::get_blocks(); let tenure_extend_block = if nmb_old_blocks + 1 == test_observer::get_blocks().len() { blocks.last().unwrap() From 80fbce7bd480829d6c88895bf1f312f05fb8c156 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 6 Nov 2024 15:51:33 -0800 Subject: [PATCH 085/109] BUGFIX: use the block election snapshot when creating a tenure extend. Not the tip consensus hash as it may not match and will result in a bad signer slot generation when writing block proposal Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 7c2ab155d3..c90ebc60c7 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -880,23 +880,14 @@ impl RelayerThread { SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap(); let canonical_stacks_tip = StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); - let block_election_snapshot = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) - .map_err(|e| { - error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })? - .ok_or_else(|| { - error!("Relayer: failed to get block snapshot for canonical tip"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; let Some(ref mining_key) = self.config.miner.mining_key else { return Ok(()); }; let mining_pkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(mining_key)); - let last_winner_snapshot = { + // If we won the last sortition, then we should start a new tenure off of it. + let block_election_snapshot = { let ih = self.sortdb.index_handle(&burn_tip.sortition_id); ih.get_last_snapshot_with_sortition(burn_tip.block_height) .map_err(|e| { @@ -905,15 +896,14 @@ impl RelayerThread { })? }; - let won_last_sortition = last_winner_snapshot.miner_pk_hash == Some(mining_pkh); + let won_last_sortition = block_election_snapshot.miner_pk_hash == Some(mining_pkh); debug!( "Relayer: Current burn block had no sortition. Checking for tenure continuation."; "won_last_sortition" => won_last_sortition, "current_mining_pkh" => %mining_pkh, - "last_winner_snapshot.miner_pk_hash" => ?last_winner_snapshot.miner_pk_hash, + "block_election_snapshot.miner_pk_hash" => ?block_election_snapshot.miner_pk_hash, "canonical_stacks_tip_id" => %canonical_stacks_tip, "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, - "block_election_ch" => %block_election_snapshot.consensus_hash, "burn_view_ch" => %new_burn_view, ); From b9438fa2fd3814b27db46861fd0841ba11a76ec3 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 6 Nov 2024 15:54:47 -0800 Subject: [PATCH 086/109] Cargo fmt Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 529d8a26e1..3b4d095d2e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5176,8 +5176,12 @@ fn continue_after_fast_block_no_sortition() { }) .expect("Timed out waiting for boostrapped node to catch up to the miner"); - let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private(&conf.miner.mining_key.unwrap())); - let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap())); + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf.miner.mining_key.unwrap(), + )); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf_node_2.miner.mining_key.unwrap(), + )); debug!("The miner key for miner 1 is {mining_pkh_1}"); debug!("The miner key for miner 2 is {mining_pkh_2}"); @@ -5233,7 +5237,10 @@ fn continue_after_fast_block_no_sortition() { .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; - Ok(blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 && stacks_height > stacks_height_before) + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before, + ) }) .unwrap(); From 514c0d02b7065180d9181503242d4dfd1d8d2fe0 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 6 Nov 2024 18:18:31 -0800 Subject: [PATCH 087/109] Fix VRF proof calculation and update test to be more expansive Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/miner.rs | 20 +- .../stacks-node/src/nakamoto_node/relayer.rs | 39 ++- testnet/stacks-node/src/tests/signer/v0.rs | 285 +++++++++++++----- 3 files changed, 257 insertions(+), 87 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 042df70be1..95fbb9aebd 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -109,6 +109,8 @@ pub enum MinerReason { /// sortition. burn_view_consensus_hash: ConsensusHash, }, + /// The miner thread was spawned to initialize a prior empty tenure + EmptyTenure, } impl std::fmt::Display for MinerReason { @@ -121,6 +123,7 @@ impl std::fmt::Display for MinerReason { f, "Extended: burn_view_consensus_hash = {burn_view_consensus_hash:?}", ), + MinerReason::EmptyTenure => write!(f, "EmptyTenure"), } } } @@ -919,22 +922,25 @@ impl BlockMinerThread { fn make_vrf_proof(&mut self) -> Option { // if we're a mock miner, then make sure that the keychain has a keypair for the mocked VRF // key + let sortition_hash = if matches!(self.reason, MinerReason::EmptyTenure) { + self.burn_election_block.sortition_hash + } else { + self.burn_block.sortition_hash + }; let vrf_proof = if self.config.get_node_config(false).mock_mining { - self.keychain.generate_proof( - VRF_MOCK_MINER_KEY, - self.burn_block.sortition_hash.as_bytes(), - ) + self.keychain + .generate_proof(VRF_MOCK_MINER_KEY, sortition_hash.as_bytes()) } else { self.keychain.generate_proof( self.registered_key.target_block_height, - self.burn_block.sortition_hash.as_bytes(), + sortition_hash.as_bytes(), ) }; debug!( "Generated VRF Proof: {} over {} ({},{}) with key {}", vrf_proof.to_hex(), - &self.burn_block.sortition_hash, + &sortition_hash, &self.burn_block.block_height, &self.burn_block.burn_header_hash, &self.registered_key.vrf_public_key.to_hex() @@ -1155,7 +1161,7 @@ impl BlockMinerThread { }; let (tenure_change_tx, coinbase_tx) = match &self.reason { - MinerReason::BlockFound => { + MinerReason::BlockFound | MinerReason::EmptyTenure => { let tenure_change_tx = self.generate_tenure_change_tx(current_miner_nonce, payload)?; let coinbase_tx = diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index c90ebc60c7..6867b81a96 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -901,6 +901,7 @@ impl RelayerThread { "Relayer: Current burn block had no sortition. Checking for tenure continuation."; "won_last_sortition" => won_last_sortition, "current_mining_pkh" => %mining_pkh, + "block_election_snapshot.consensus_hash" => %block_election_snapshot.consensus_hash, "block_election_snapshot.miner_pk_hash" => ?block_election_snapshot.miner_pk_hash, "canonical_stacks_tip_id" => %canonical_stacks_tip, "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, @@ -911,13 +912,45 @@ impl RelayerThread { return Ok(()); } + let tip_info = + NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &self.sortdb) + .map_err(|e| { + error!("Relayer: failed to get canonical block header: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + .ok_or_else(|| { + error!("Relayer: failed to get canonical block header"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + let last_non_empty_sortition_snapshot = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &tip_info.consensus_hash) + .map_err(|e| { + error!("Relayer: failed to get last non-empty sortition snapshot: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + .ok_or_else(|| { + error!("Relayer: failed to get last non-empty sortition snapshot"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + let won_last_non_empty_sortition_snapshot = + last_non_empty_sortition_snapshot.miner_pk_hash == Some(mining_pkh); + + let reason = if !won_last_non_empty_sortition_snapshot { + debug!("Relayer: Failed to issue a tenure change payload in our last tenure. Issue a tenure change payload again."); + MinerReason::EmptyTenure + } else { + debug!("Relayer: Successfully issued a tenure change payload in our last tenure. Issue a continue extend."); + MinerReason::Extended { + burn_view_consensus_hash: new_burn_view, + } + }; match self.start_new_tenure( canonical_stacks_tip, // For tenure extend, we should be extending off the canonical tip block_election_snapshot, burn_tip, - MinerReason::Extended { - burn_view_consensus_hash: new_burn_view, - }, + reason, ) { Ok(()) => { debug!("Relayer: successfully started new tenure."); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3b4d095d2e..e67d95853a 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5020,17 +5020,25 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { /// Test a scenario where: /// Two miners boot to Nakamoto. -/// Miner 1 wins the first Nakamoto tenure A. Miner 1 mines a regular stacks block N. -/// Miner 2 wins the second Nakamoto tenure B and proposes block N+1, but it is rejected by the signers. -/// An empty burn block is mined -/// Miner 2 issue a new TenureChangePayload in block N+1' -/// Signers accept the new TenureChangePayload and the stacks tip should advance to N+1' -/// Miner 2 issue a TenureExtend in block proposal N+2' -/// Signers accept the TenureExtend and the stacks tip should advance to N+2' +/// Miner 1 wins the first tenure and proposes a block N with a TenureChangePayload +/// Signers accept and the stacks tip advances to N +/// Miner 2 wins the second tenure B but its proposed blocks are rejected by the signers. +/// Mine 2 empty burn blocks (simulate fast blocks scenario) +/// Miner 2 proposes block N+1 with a TenureChangePayload +/// Signers accept and the stacks tip advances to N+1 +/// Miner 2 proposes block N+2 with a TokenTransfer +/// Signers accept and the stacks tip advances to N+2 +/// Mine an empty burn block +/// Miner 2 proposes block N+3 with a TenureExtend +/// Signers accept and the chain advances to N+3 +/// Miner 1 wins the next tenure and proposes a block N+4 with a TenureChangePayload +/// Signers accept and the chain advances to N+4 /// Asserts: -/// - Block N+1' contains the TenureChangePayload -/// - Block N+2 contains the TenureExtend -/// - The stacks tip advances to N+2' +/// - Block N+1 contains the TenureChangePayload +/// - Block N+2 contains the TokenTransfer +/// - Block N+3 contains the TenureExtend +/// - Block N+4 contains the TenureChangePayload +/// - The stacks tip advances to N+4 #[test] #[ignore] fn continue_after_fast_block_no_sortition() { @@ -5182,8 +5190,8 @@ fn continue_after_fast_block_no_sortition() { let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( &conf_node_2.miner.mining_key.unwrap(), )); - debug!("The miner key for miner 1 is {mining_pkh_1}"); - debug!("The miner key for miner 2 is {mining_pkh_2}"); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); info!("------------------------- Reached Epoch 3.0 -------------------------"); @@ -5213,7 +5221,7 @@ fn continue_after_fast_block_no_sortition() { info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); - + let nmb_old_blocks = test_observer::get_blocks().len(); let stacks_height_before = signer_test .stacks_client .get_peer_info() @@ -5226,9 +5234,10 @@ fn continue_after_fast_block_no_sortition() { .build_next_block(1); btc_blocks_mined += 1; - // assure we have a sortition + // assure we have a successful sortition that miner A won let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); // wait for the new block to be processed wait_for(60, || { @@ -5239,15 +5248,25 @@ fn continue_after_fast_block_no_sortition() { .stacks_tip_height; Ok( blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 - && stacks_height > stacks_height_before, + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, ) }) .unwrap(); - info!( - "Nakamoto blocks mined: {}", - blocks_mined1.load(Ordering::SeqCst) - ); + let blocks = test_observer::get_blocks(); + let tenure_change_tx = &blocks.last().unwrap(); + let transactions = tenure_change_tx["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) => { + assert_eq!(payload.cause, TenureChangeCause::BlockFound); + } + _ => panic!("Expected tenure change transaction, got {parsed:?}"), + }; info!("------------------------- Make Signers Reject All Subsequent Proposals -------------------------"); @@ -5281,7 +5300,6 @@ fn continue_after_fast_block_no_sortition() { .unwrap(); // Make miner 2 also fail to submit any FURTHER block commits - info!("------------------------- Pause Miner 2's Block Commits -------------------------"); rl2_skip_commit_op.set(true); let burn_height_before = get_burn_height(); @@ -5298,9 +5316,10 @@ fn continue_after_fast_block_no_sortition() { .unwrap(); btc_blocks_mined += 1; - // assure we have a sortition + // assure we have a successful sortition that miner B won let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); info!("----- Waiting for block rejections -----"); let min_rejections = num_signers * 4 / 10; @@ -5326,36 +5345,38 @@ fn continue_after_fast_block_no_sortition() { }) .expect("Timed out waiting for block rejections"); - // Miner another block and ensure there is _no_ sortition - info!("------------------------- Mine Burn Block with No Sortition -------------------------"); - let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); - let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); - let burn_height_before = get_burn_height(); - let commits_before_1 = rl1_commits.load(Ordering::SeqCst); - let commits_before_2 = rl2_commits.load(Ordering::SeqCst); + // Mine another couple burn blocks and ensure there is _no_ sortition + info!("------------------------- Mine Two Burn Block(s) with No Sortitions -------------------------"); + for _ in 0..2 { + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let burn_height_before = get_burn_height(); + let commits_before_1 = rl1_commits.load(Ordering::SeqCst); + let commits_before_2 = rl2_commits.load(Ordering::SeqCst); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); - btc_blocks_mined += 1; + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + btc_blocks_mined += 1; - wait_for(30, || Ok(get_burn_height() > burn_height_before)).unwrap(); + wait_for(30, || Ok(get_burn_height() > burn_height_before)).unwrap(); - assert_eq!(rl1_commits.load(Ordering::SeqCst), commits_before_1); - assert_eq!(rl2_commits.load(Ordering::SeqCst), commits_before_2); - assert_eq!( - blocks_mined1.load(Ordering::SeqCst), - blocks_processed_before_1 - ); - assert_eq!( - blocks_mined2.load(Ordering::SeqCst), - blocks_processed_before_2 - ); + assert_eq!(rl1_commits.load(Ordering::SeqCst), commits_before_1); + assert_eq!(rl2_commits.load(Ordering::SeqCst), commits_before_2); + assert_eq!( + blocks_mined1.load(Ordering::SeqCst), + blocks_processed_before_1 + ); + assert_eq!( + blocks_mined2.load(Ordering::SeqCst), + blocks_processed_before_2 + ); - // assure we have NO sortition - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - assert!(!tip.sortition); + // assure we have NO sortition + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(!tip.sortition); + } // Verify that no Stacks blocks have been mined (signers are ignoring) and no commits have been submitted by either miner let stacks_height = signer_test @@ -5366,11 +5387,11 @@ fn continue_after_fast_block_no_sortition() { assert_eq!(stacks_height, stacks_height_before); let stacks_height_before = stacks_height; - let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); - info!("------------------------- Enabling signers to approve proposals -------------------------"; + info!("------------------------- Enabling Signer Block Proposals -------------------------"; "stacks_height" => stacks_height_before, ); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); let nmb_old_blocks = test_observer::get_blocks().len(); // Allow signers to respond to proposals again TEST_REJECT_ALL_BLOCK_PROPOSAL @@ -5378,9 +5399,52 @@ fn continue_after_fast_block_no_sortition() { .unwrap() .replace(Vec::new()); - info!("------------------------- Mining Interim Block -------------------------"); - // submit a tx so that the miner will mine an extra block just in case due to timing constraints, the first block with the tenure extend was - // rejected already by the signers + info!("------------------------- Wait for Miner B's Block N -------------------------"); + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!( + "------------------------- Verify Tenure Change Tx in Miner B's Block N -------------------------" + ); + + let blocks = test_observer::get_blocks(); + assert_eq!(blocks.len(), nmb_old_blocks + 1,); + let tenure_change_block = &blocks.last().unwrap(); + let transactions = tenure_change_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) => { + assert_eq!(payload.cause, TenureChangeCause::BlockFound); + } + _ => panic!("Expected tenure change transaction, got {parsed:?}"), + }; + + info!("------------------------- Wait for Miner B's Block N+1 -------------------------"); + + let nmb_old_blocks = blocks.len(); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -5391,58 +5455,125 @@ fn continue_after_fast_block_no_sortition() { ); submit_tx(&http_origin, &transfer_tx); - // TODO: combine these wait fors once fixed code // wait for the new block to be processed - wait_for(30, || { - Ok(blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2) - }) - .expect("Timed out waiting for block to be mined and processed"); - wait_for(30, || { let stacks_height = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; - Ok(stacks_height > stacks_height_before) + Ok( + blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) }) - .expect("Expected a new Stacks block to be mined"); + .expect("Timed out waiting for block to be mined and processed"); - wait_for( - 30, - || Ok(test_observer::get_blocks().len() > nmb_old_blocks), - ) - .expect("Timed out waiting for test observer to see new block"); + info!("------------------------- Verify Miner B's Block N+1 -------------------------"); - info!( - "------------------------- Verify Tenure Extend Tx from Miner B -------------------------" + let blocks = test_observer::get_blocks(); + assert_eq!(blocks.len(), nmb_old_blocks + 1,); + let tenure_extend_block = blocks.last().unwrap(); + let transactions = tenure_extend_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + assert!( + matches!(parsed.payload, TransactionPayload::TokenTransfer(_, _, _)), + "Expected Token Transfer Transaction. Got {:?}", + parsed.payload ); + info!("------------------------- Mine An Empty Sortition -------------------------"); + let nmb_old_blocks = blocks.len(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && test_observer::get_blocks().len() > nmb_old_blocks) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+2 -------------------------"); let blocks = test_observer::get_blocks(); - let tenure_extend_block = if nmb_old_blocks + 1 == test_observer::get_blocks().len() { - blocks.last().unwrap() - } else { - &blocks[blocks.len() - 2] - }; + assert_eq!(blocks.len(), nmb_old_blocks + 1,); + let tenure_extend_block = &blocks.last().unwrap(); let transactions = tenure_extend_block["transactions"].as_array().unwrap(); let tx = transactions.first().expect("No transactions in block"); let raw_tx = tx["raw_tx"].as_str().unwrap(); let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); match &parsed.payload { - TransactionPayload::TenureChange(payload) - if payload.cause == TenureChangeCause::Extended => {} - _ => panic!("Expected tenure extend transaction, got {parsed:?}"), + TransactionPayload::TenureChange(payload) => { + assert_eq!(payload.cause, TenureChangeCause::Extended); + } + _ => panic!("Expected tenure change transaction, got {parsed:?}"), }; + info!("------------------------- Unpause Miner A's Block Commits -------------------------"); + let commits_before_1 = rl1_commits.load(Ordering::SeqCst); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(false); + wait_for(30, || { + Ok(rl1_commits.load(Ordering::SeqCst) > commits_before_1) + }) + .unwrap(); + + info!("------------------------- Run Miner A's Tenure -------------------------"); + let nmb_old_blocks = blocks.len(); + let burn_height_before = get_burn_height(); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && test_observer::get_blocks().len() > nmb_old_blocks) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+4 -------------------------"); + let blocks = test_observer::get_blocks(); + let tenure_change_tx = &blocks.last().unwrap(); + let transactions = tenure_change_tx["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) => { + assert_eq!(payload.cause, TenureChangeCause::BlockFound); + } + _ => panic!("Expected tenure change transaction, got {parsed:?}"), + }; + + info!( + "------------------------- Confirm Burn and Stacks Block Heights -------------------------" + ); let peer_info = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); - // We successfully mine at least 2 stacks block in this test - assert!(peer_info.stacks_tip_height >= starting_peer_height + 2); + assert_eq!(peer_info.stacks_tip_height, starting_peer_height + 5); + + info!("------------------------- Shutdown -------------------------"); rl2_coord_channels .lock() .expect("Mutex poisoned") From a28a6dd6304ee306aa1db7a02014012d11c4c1c7 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Wed, 6 Nov 2024 18:41:30 -0800 Subject: [PATCH 088/109] Cleanup tests Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 111 ++++++++------------- 1 file changed, 40 insertions(+), 71 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e67d95853a..1e930da482 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5159,6 +5159,37 @@ fn continue_after_fast_block_no_sortition() { let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + // Some helper functions for verifying the blocks contain their expected transactions + let verify_last_block_contains_tenure_change_tx = |cause: TenureChangeCause| { + let blocks = test_observer::get_blocks(); + let tenure_change_tx = &blocks.last().unwrap(); + let transactions = tenure_change_tx["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) => { + assert_eq!(payload.cause, cause); + } + _ => panic!("Expected tenure change transaction, got {parsed:?}"), + }; + }; + + let verify_last_block_contains_transfer_tx = || { + let blocks = test_observer::get_blocks(); + let tenure_change_tx = &blocks.last().unwrap(); + let transactions = tenure_change_tx["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + assert!( + matches!(parsed.payload, TransactionPayload::TokenTransfer(_, _, _)), + "Expected token transfer transaction, got {parsed:?}" + ); + }; + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); // Make sure Miner 2 cannot win a sortition at first. @@ -5254,19 +5285,7 @@ fn continue_after_fast_block_no_sortition() { }) .unwrap(); - let blocks = test_observer::get_blocks(); - let tenure_change_tx = &blocks.last().unwrap(); - let transactions = tenure_change_tx["transactions"].as_array().unwrap(); - let tx = transactions.first().expect("No transactions in block"); - let raw_tx = tx["raw_tx"].as_str().unwrap(); - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => { - assert_eq!(payload.cause, TenureChangeCause::BlockFound); - } - _ => panic!("Expected tenure change transaction, got {parsed:?}"), - }; + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); info!("------------------------- Make Signers Reject All Subsequent Proposals -------------------------"); @@ -5303,6 +5322,7 @@ fn continue_after_fast_block_no_sortition() { rl2_skip_commit_op.set(true); let burn_height_before = get_burn_height(); + info!("------------------------- Miner 2 Mines an Empty Tenure B -------------------------"; "burn_height_before" => burn_height_before, "rejections_before" => rejections_before, @@ -5418,25 +5438,11 @@ fn continue_after_fast_block_no_sortition() { info!( "------------------------- Verify Tenure Change Tx in Miner B's Block N -------------------------" ); - - let blocks = test_observer::get_blocks(); - assert_eq!(blocks.len(), nmb_old_blocks + 1,); - let tenure_change_block = &blocks.last().unwrap(); - let transactions = tenure_change_block["transactions"].as_array().unwrap(); - let tx = transactions.first().expect("No transactions in block"); - let raw_tx = tx["raw_tx"].as_str().unwrap(); - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => { - assert_eq!(payload.cause, TenureChangeCause::BlockFound); - } - _ => panic!("Expected tenure change transaction, got {parsed:?}"), - }; + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); info!("------------------------- Wait for Miner B's Block N+1 -------------------------"); - let nmb_old_blocks = blocks.len(); + let nmb_old_blocks = test_observer::get_blocks().len(); let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); let stacks_height_before = signer_test .stacks_client @@ -5472,22 +5478,10 @@ fn continue_after_fast_block_no_sortition() { info!("------------------------- Verify Miner B's Block N+1 -------------------------"); - let blocks = test_observer::get_blocks(); - assert_eq!(blocks.len(), nmb_old_blocks + 1,); - let tenure_extend_block = blocks.last().unwrap(); - let transactions = tenure_extend_block["transactions"].as_array().unwrap(); - let tx = transactions.first().expect("No transactions in block"); - let raw_tx = tx["raw_tx"].as_str().unwrap(); - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - assert!( - matches!(parsed.payload, TransactionPayload::TokenTransfer(_, _, _)), - "Expected Token Transfer Transaction. Got {:?}", - parsed.payload - ); + verify_last_block_contains_transfer_tx(); info!("------------------------- Mine An Empty Sortition -------------------------"); - let nmb_old_blocks = blocks.len(); + let nmb_old_blocks = test_observer::get_blocks().len(); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -5500,20 +5494,7 @@ fn continue_after_fast_block_no_sortition() { btc_blocks_mined += 1; info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+2 -------------------------"); - let blocks = test_observer::get_blocks(); - assert_eq!(blocks.len(), nmb_old_blocks + 1,); - let tenure_extend_block = &blocks.last().unwrap(); - let transactions = tenure_extend_block["transactions"].as_array().unwrap(); - let tx = transactions.first().expect("No transactions in block"); - let raw_tx = tx["raw_tx"].as_str().unwrap(); - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => { - assert_eq!(payload.cause, TenureChangeCause::Extended); - } - _ => panic!("Expected tenure change transaction, got {parsed:?}"), - }; + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); info!("------------------------- Unpause Miner A's Block Commits -------------------------"); let commits_before_1 = rl1_commits.load(Ordering::SeqCst); @@ -5527,7 +5508,7 @@ fn continue_after_fast_block_no_sortition() { .unwrap(); info!("------------------------- Run Miner A's Tenure -------------------------"); - let nmb_old_blocks = blocks.len(); + let nmb_old_blocks = test_observer::get_blocks().len(); let burn_height_before = get_burn_height(); let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); next_block_and( @@ -5548,19 +5529,7 @@ fn continue_after_fast_block_no_sortition() { assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+4 -------------------------"); - let blocks = test_observer::get_blocks(); - let tenure_change_tx = &blocks.last().unwrap(); - let transactions = tenure_change_tx["transactions"].as_array().unwrap(); - let tx = transactions.first().expect("No transactions in block"); - let raw_tx = tx["raw_tx"].as_str().unwrap(); - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => { - assert_eq!(payload.cause, TenureChangeCause::BlockFound); - } - _ => panic!("Expected tenure change transaction, got {parsed:?}"), - }; + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); info!( "------------------------- Confirm Burn and Stacks Block Heights -------------------------" From 3fb6d6fd24c89517d706633753113b45fd25154d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 7 Nov 2024 13:04:41 -0500 Subject: [PATCH 089/109] chore: fix p2p handling of unsolicited nakamoto blocks by loading the reward set for nakamoto prepare phases eagerly, and pass the stacks tip height via NetworkResult to the relayer so it can update prometheus --- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- stackslib/src/net/mod.rs | 32 +++- stackslib/src/net/p2p.rs | 180 +++++++++++++++++++---- stackslib/src/net/relay.rs | 6 +- stackslib/src/net/tests/mod.rs | 12 ++ stackslib/src/net/tests/relay/epoch2x.rs | 2 + stackslib/src/net/unsolicited.rs | 3 +- 7 files changed, 200 insertions(+), 37 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index d88082ae41..d67de8e987 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -328,7 +328,7 @@ pub trait StacksDBIndexed { fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError>; fn sqlite(&self) -> &Connection; - /// Get the ancestor block hash given a height + /// Get the ancestor block hash given a coinbase height fn get_ancestor_block_id( &mut self, coinbase_height: u64, diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 9e17c3f428..d0dc2d0a1b 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1515,6 +1515,10 @@ pub struct NetworkResult { pub num_connected_peers: usize, /// The observed burnchain height pub burn_height: u64, + /// The observed stacks coinbase height + pub coinbase_height: u64, + /// The observed stacks tip height (different in Nakamoto from coinbase height) + pub stacks_tip_height: u64, /// The consensus hash of the stacks tip (prefixed `rc_` for historical reasons) pub rc_consensus_hash: ConsensusHash, /// The current StackerDB configs @@ -1529,6 +1533,8 @@ impl NetworkResult { num_download_passes: u64, num_connected_peers: usize, burn_height: u64, + coinbase_height: u64, + stacks_tip_height: u64, rc_consensus_hash: ConsensusHash, stacker_db_configs: HashMap, ) -> NetworkResult { @@ -1557,6 +1563,8 @@ impl NetworkResult { num_download_passes: num_download_passes, num_connected_peers, burn_height, + coinbase_height, + stacks_tip_height, rc_consensus_hash, stacker_db_configs, } @@ -3416,8 +3424,6 @@ pub mod test { let mut stacks_node = self.stacks_node.take().unwrap(); let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); - let old_tip = self.network.stacks_tip.clone(); - self.network .refresh_burnchain_view(&indexer, &sortdb, &mut stacks_node.chainstate, false) .unwrap(); @@ -3426,6 +3432,28 @@ pub mod test { self.stacks_node = Some(stacks_node); } + pub fn refresh_reward_cycles(&mut self) { + let sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_block_id = self.network.stacks_tip.block_id(); + let tip_height = self.network.stacks_tip.height; + + self.network + .refresh_reward_cycles( + &sortdb, + &mut stacks_node.chainstate, + &tip, + &tip_block_id, + tip_height, + ) + .unwrap(); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(stacks_node); + } + pub fn for_each_convo_p2p(&mut self, mut f: F) -> Vec> where F: FnMut(usize, &mut ConversationP2P) -> Result, diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index d77c0df9fa..c847fa68a3 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4295,38 +4295,55 @@ impl PeerNetwork { } } - /// Refresh our view of the last three reward cycles - /// This ensures that the PeerNetwork has cached copies of the reward cycle data (including the - /// signing set) for the current, previous, and previous-previous reward cycles. This data is - /// in turn consumed by the Nakamoto block downloader, which must validate blocks signed from - /// any of these reward cycles. - #[cfg_attr(test, mutants::skip)] - fn refresh_reward_cycles( - &mut self, + /// Determine if we need to invalidate a given cached reward set. + /// + /// In Epoch 2, this requires checking the first sortition in the start of the reward set's + /// reward phase. + /// + /// In Nakamoto, this requires checking the anchor block in the prepare phase for the upcoming + /// reward phase. + fn check_reload_cached_reward_set( + &self, sortdb: &SortitionDB, - chainstate: &mut StacksChainState, + chainstate: &StacksChainState, + rc: u64, tip_sn: &BlockSnapshot, tip_block_id: &StacksBlockId, - ) -> Result<(), net_error> { - let cur_rc = self - .burnchain - .block_height_to_reward_cycle(tip_sn.block_height) - .expect("FATAL: sortition from before system start"); - - let prev_rc = cur_rc.saturating_sub(1); - let prev_prev_rc = prev_rc.saturating_sub(1); - let ih = sortdb.index_handle(&tip_sn.sortition_id); - - for rc in [cur_rc, prev_rc, prev_prev_rc] { - debug!("Refresh reward cycle info for cycle {}", rc); + tip_height: u64, + ) -> Result { + let epoch = self.get_epoch_at_burn_height(tip_sn.block_height); + if epoch.epoch_id >= StacksEpochId::Epoch30 { + // epoch 3, where there are no forks except from bugs or burnchain reorgs. + // invalidate reward cycles on burnchain or stacks reorg, should they ever happen + let reorg = Self::is_reorg(Some(&self.burnchain_tip), tip_sn, sortdb) + || Self::is_nakamoto_reorg( + &self.stacks_tip.block_id(), + self.stacks_tip.height, + tip_block_id, + tip_height, + chainstate, + ); + if reorg { + info!("Burnchain or Stacks reorg detected; will invalidate cached reward set for cycle {rc}"); + } + return Ok(reorg); + } else { + // epoch 2 // NOTE: + 1 needed because the sortition db indexes anchor blocks at index height 1, // not 0 + let ih = sortdb.index_handle(&tip_sn.sortition_id); let rc_start_height = self.burnchain.nakamoto_first_block_of_cycle(rc) + 1; let Some(ancestor_sort_id) = get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? else { - // reward cycle is too far back for there to be an ancestor - continue; + // reward cycle is too far back for there to be an ancestor, so no need to + // reload + test_debug!( + "No ancestor sortition ID off of {} (height {}) at {rc_start_height})", + &tip_sn.sortition_id, + tip_sn.block_height + ); + return Ok(false); }; let ancestor_ih = sortdb.index_handle(&ancestor_sort_id); let anchor_hash_opt = ancestor_ih.get_last_anchor_block_hash()?; @@ -4340,12 +4357,53 @@ impl PeerNetwork { || cached_rc_info.anchor_block_hash == *anchor_hash { // cached reward set data is still valid - continue; + test_debug!("Cached reward cycle {rc} is still valid"); + return Ok(false); } } } + } + + Ok(true) + } - debug!("Load reward cycle info for cycle {}", rc); + /// Refresh our view of the last three reward cycles + /// This ensures that the PeerNetwork has cached copies of the reward cycle data (including the + /// signing set) for the current, previous, and previous-previous reward cycles. This data is + /// in turn consumed by the Nakamoto block downloader, which must validate blocks signed from + /// any of these reward cycles. + #[cfg_attr(test, mutants::skip)] + pub fn refresh_reward_cycles( + &mut self, + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + tip_sn: &BlockSnapshot, + tip_block_id: &StacksBlockId, + tip_height: u64, + ) -> Result<(), net_error> { + let cur_rc = self + .burnchain + .block_height_to_reward_cycle(tip_sn.block_height) + .expect("FATAL: sortition from before system start"); + + let prev_rc = cur_rc.saturating_sub(1); + let prev_prev_rc = prev_rc.saturating_sub(1); + + for rc in [cur_rc, prev_rc, prev_prev_rc] { + debug!("Refresh reward cycle info for cycle {}", rc); + if self.current_reward_sets.contains_key(&rc) + && !self.check_reload_cached_reward_set( + sortdb, + chainstate, + rc, + tip_sn, + tip_block_id, + tip_height, + )? + { + continue; + } + debug!("Refresh reward cycle info for cycle {rc}"); let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( rc, &tip_sn.sortition_id, @@ -4452,6 +4510,7 @@ impl PeerNetwork { chainstate, &canonical_sn, &new_stacks_tip_block_id, + stacks_tip_height, )?; } @@ -4649,7 +4708,7 @@ impl PeerNetwork { debug!( "{:?}: handle unsolicited stacks messages: tenure changed {} != {}, {} buffered", self.get_local_peer(), - &self.burnchain_tip.consensus_hash, + &self.stacks_tip.consensus_hash, &canonical_sn.consensus_hash, self.pending_stacks_messages .iter() @@ -4751,7 +4810,6 @@ impl PeerNetwork { ibd, true, ); - let unhandled_messages = self.handle_unsolicited_stacks_messages(chainstate, unhandled_messages, true); @@ -4998,7 +5056,7 @@ impl PeerNetwork { Ok(()) } - /// Static helper to check to see if there has been a reorg + /// Static helper to check to see if there has been a burnchain reorg pub fn is_reorg( last_sort_tip: Option<&BlockSnapshot>, sort_tip: &BlockSnapshot, @@ -5021,15 +5079,15 @@ impl PeerNetwork { { // current and previous sortition tips are at the same height, but represent different // blocks. - debug!( - "Reorg detected at burn height {}: {} != {}", + info!( + "Burnchain reorg detected at burn height {}: {} != {}", sort_tip.block_height, &last_sort_tip.consensus_hash, &sort_tip.consensus_hash ); return true; } // It will never be the case that the last and current tip have different heights, but the - // smae consensus hash. If they have the same height, then we would have already returned + // same consensus hash. If they have the same height, then we would have already returned // since we've handled both the == and != cases for their consensus hashes. So if we reach // this point, the heights and consensus hashes are not equal. We only need to check that // last_sort_tip is an ancestor of sort_tip @@ -5061,6 +5119,60 @@ impl PeerNetwork { false } + /// Static helper to check to see if there has been a Nakamoto reorg. + /// Return true if there's a Nakamoto reorg + /// Return false otherwise. + pub fn is_nakamoto_reorg( + last_stacks_tip: &StacksBlockId, + last_stacks_tip_height: u64, + stacks_tip: &StacksBlockId, + stacks_tip_height: u64, + chainstate: &StacksChainState, + ) -> bool { + if last_stacks_tip == stacks_tip { + // same tip + return false; + } + + if last_stacks_tip_height == stacks_tip_height && last_stacks_tip != stacks_tip { + // last block is a sibling + info!( + "Stacks reorg detected at stacks height {last_stacks_tip_height}: {last_stacks_tip} != {stacks_tip}", + ); + return true; + } + + if stacks_tip_height < last_stacks_tip_height { + info!( + "Stacks reorg (chain shrink) detected at stacks height {last_stacks_tip_height}: {last_stacks_tip} != {stacks_tip}", + ); + return true; + } + + // It will never be the case that the last and current tip have different heights, but the + // same block ID. If they have the same height, then we would have already returned + // since we've handled both the == and != cases for their block IDs. So if we reach + // this point, the heights and block IDs are not equal. We only need to check that + // last_stacks_tip is an ancestor of stacks_tip + + let mut cursor = stacks_tip.clone(); + for _ in last_stacks_tip_height..stacks_tip_height { + let Ok(Some(parent_id)) = + NakamotoChainState::get_nakamoto_parent_block_id(chainstate.db(), &cursor) + else { + error!("Failed to load parent id of {cursor}"); + return true; + }; + cursor = parent_id; + } + + debug!("is_nakamoto_reorg check"; + "parent_id" => %cursor, + "last_stacks_tip" => %last_stacks_tip); + + cursor != *last_stacks_tip + } + /// Log our neighbors. /// Used for testing and debuggin fn log_neighbors(&mut self) { @@ -5143,6 +5255,10 @@ impl PeerNetwork { } }; + test_debug!( + "unsolicited_buffered_messages = {:?}", + &unsolicited_buffered_messages + ); let mut network_result = NetworkResult::new( self.stacks_tip.block_id(), self.num_state_machine_passes, @@ -5150,6 +5266,8 @@ impl PeerNetwork { self.num_downloader_passes, self.peers.len(), self.chain_view.burn_block_height, + self.stacks_tip.coinbase_height, + self.stacks_tip.height, self.chain_view.rc_consensus_hash.clone(), self.get_stacker_db_configs_owned(), ); diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index f923aa1281..b5fbf76cf4 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -2322,8 +2322,6 @@ impl Relayer { event_observer, )?; - update_stacks_tip_height(chain_height as i64); - Ok(ret) } @@ -3034,6 +3032,10 @@ impl Relayer { event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), )?; + update_stacks_tip_height( + i64::try_from(network_result.stacks_tip_height).unwrap_or(i64::MAX), + ); + let receipts = ProcessedNetReceipts { mempool_txs_added, processed_unconfirmed_state, diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index b8e9167ad9..d9c7402bf8 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -1154,6 +1154,8 @@ fn test_network_result_update() { 1, 1, 1, + 1, + 1, ConsensusHash([0x11; 20]), HashMap::new(), ); @@ -1165,6 +1167,8 @@ fn test_network_result_update() { 2, 2, 2, + 2, + 2, ConsensusHash([0x22; 20]), HashMap::new(), ); @@ -1600,6 +1604,8 @@ fn test_network_result_update() { 10, 10, 10, + 10, + 10, ConsensusHash([0xaa; 20]), HashMap::new(), ); @@ -1657,6 +1663,8 @@ fn test_network_result_update() { 10, 10, 10, + 10, + 10, ConsensusHash([0xaa; 20]), HashMap::new(), ); @@ -1714,6 +1722,8 @@ fn test_network_result_update() { 11, 11, 11, + 11, + 11, ConsensusHash([0xbb; 20]), HashMap::new(), ); @@ -1736,6 +1746,8 @@ fn test_network_result_update() { 11, 11, 11, + 11, + 11, ConsensusHash([0xbb; 20]), HashMap::new(), ); diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 23d1dd60a8..4f18e109a5 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -3097,6 +3097,8 @@ fn process_new_blocks_rejects_problematic_asts() { 0, 0, 0, + 0, + 0, ConsensusHash([0x01; 20]), HashMap::new(), ); diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index d10a6ee368..231e0a91af 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -1221,13 +1221,14 @@ impl PeerNetwork { ) { // unable to store this due to quota being exceeded + debug!("{:?}: drop message to quota being exceeded: {:?}", self.get_local_peer(), &message.payload.get_message_description()); return false; } if !buffer { debug!( "{:?}: Re-try handling buffered sortition-bound message {} from {:?}", - &self.get_local_peer(), + self.get_local_peer(), &message.payload.get_message_description(), &neighbor_key ); From 5fdaaf0eb431d84ef8b796f875b060045800cd32 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 7 Nov 2024 13:05:21 -0500 Subject: [PATCH 090/109] fix: we now report the actual block height --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3e9f235424..87826fba6f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1631,10 +1631,8 @@ fn simple_neon_integration() { tip.stacks_block_height ); - let expected_result_2 = format!( - "stacks_node_stacks_tip_height {}", - tip.stacks_block_height - 1 - ); + let expected_result_2 = + format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); Ok(res.contains(&expected_result_1) && res.contains(&expected_result_2)) }) .expect("Prometheus metrics did not update"); From f2a8d3e6a877ea5beb5c175bfe7dcad8c95f509b Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 7 Nov 2024 10:06:58 -0800 Subject: [PATCH 091/109] Fix tenure extend to build off the chain tip correclty Signed-off-by: Jacinta Ferrant --- .../stacks-node/src/nakamoto_node/relayer.rs | 54 +++++++++---------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 6867b81a96..535b0ceb81 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -887,7 +887,7 @@ impl RelayerThread { let mining_pkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(mining_key)); // If we won the last sortition, then we should start a new tenure off of it. - let block_election_snapshot = { + let last_block_election_snapshot = { let ih = self.sortdb.index_handle(&burn_tip.sortition_id); ih.get_last_snapshot_with_sortition(burn_tip.block_height) .map_err(|e| { @@ -896,13 +896,13 @@ impl RelayerThread { })? }; - let won_last_sortition = block_election_snapshot.miner_pk_hash == Some(mining_pkh); + let won_last_sortition = last_block_election_snapshot.miner_pk_hash == Some(mining_pkh); debug!( "Relayer: Current burn block had no sortition. Checking for tenure continuation."; "won_last_sortition" => won_last_sortition, "current_mining_pkh" => %mining_pkh, - "block_election_snapshot.consensus_hash" => %block_election_snapshot.consensus_hash, - "block_election_snapshot.miner_pk_hash" => ?block_election_snapshot.miner_pk_hash, + "last_block_election_snapshot.consensus_hash" => %last_block_election_snapshot.consensus_hash, + "last_block_election_snapshot.miner_pk_hash" => ?last_block_election_snapshot.miner_pk_hash, "canonical_stacks_tip_id" => %canonical_stacks_tip, "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, "burn_view_ch" => %new_burn_view, @@ -912,42 +912,40 @@ impl RelayerThread { return Ok(()); } - let tip_info = - NakamotoChainState::get_canonical_block_header(self.chainstate.db(), &self.sortdb) - .map_err(|e| { - error!("Relayer: failed to get canonical block header: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })? - .ok_or_else(|| { - error!("Relayer: failed to get canonical block header"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; - let last_non_empty_sortition_snapshot = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &tip_info.consensus_hash) + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) .map_err(|e| { - error!("Relayer: failed to get last non-empty sortition snapshot: {e:?}"); + error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); NakamotoNodeError::SnapshotNotFoundForChainTip })? .ok_or_else(|| { - error!("Relayer: failed to get last non-empty sortition snapshot"); + error!("Relayer: failed to get block snapshot for canonical tip"); NakamotoNodeError::SnapshotNotFoundForChainTip })?; let won_last_non_empty_sortition_snapshot = last_non_empty_sortition_snapshot.miner_pk_hash == Some(mining_pkh); - let reason = if !won_last_non_empty_sortition_snapshot { - debug!("Relayer: Failed to issue a tenure change payload in our last tenure. Issue a tenure change payload again."); - MinerReason::EmptyTenure - } else { - debug!("Relayer: Successfully issued a tenure change payload in our last tenure. Issue a continue extend."); - MinerReason::Extended { - burn_view_consensus_hash: new_burn_view, - } - }; + let (parent_tenure_start, block_election_snapshot, reason) = + if !won_last_non_empty_sortition_snapshot { + debug!("Relayer: Failed to issue a tenure change payload in our last tenure. Issue a new tenure change payload."); + ( + canonical_stacks_tip, // TODO: what should this be? is this correct? + last_block_election_snapshot, + MinerReason::EmptyTenure, + ) + } else { + debug!("Relayer: Successfully issued a tenure change payload in its tenure. Issue a continue extend from the chain tip."); + ( + canonical_stacks_tip, //For tenure extend, we sould be extending off the canonical tip + last_non_empty_sortition_snapshot, + MinerReason::Extended { + burn_view_consensus_hash: new_burn_view, + }, + ) + }; match self.start_new_tenure( - canonical_stacks_tip, // For tenure extend, we should be extending off the canonical tip + parent_tenure_start, block_election_snapshot, burn_tip, reason, From 8c9d129fd430855a0d95c2ee29c3fd2cf436f2e1 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 7 Nov 2024 10:19:34 -0800 Subject: [PATCH 092/109] Remove TODO comment Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 535b0ceb81..e031ccfd83 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -930,7 +930,7 @@ impl RelayerThread { if !won_last_non_empty_sortition_snapshot { debug!("Relayer: Failed to issue a tenure change payload in our last tenure. Issue a new tenure change payload."); ( - canonical_stacks_tip, // TODO: what should this be? is this correct? + canonical_stacks_tip, last_block_election_snapshot, MinerReason::EmptyTenure, ) From ecec11cf401581451c5f62980928fbbc5b14fd22 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 7 Nov 2024 13:43:00 -0500 Subject: [PATCH 093/109] docs: update mining documentation with note about RBFs --- docs/mining.md | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/docs/mining.md b/docs/mining.md index 34a299cd1c..10f49c5620 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -19,14 +19,26 @@ nakamoto_attempt_time_ms = 20000 [burnchain] # Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election burn_fee_cap = 20000 -# Amount (in sats) per byte - Used to calculate the transaction fees -satoshis_per_byte = 25 -# Amount of sats to add when RBF'ing bitcoin tx (default: 5) +# Amount in sats per byte used to calculate the Bitcoin transaction fee (default: 50) +satoshis_per_byte = 50 +# Amount of sats per byte to add when RBF'ing a Bitcoin tx (default: 5) rbf_fee_increment = 5 -# Maximum percentage to RBF bitcoin tx (default: 150% of satsv/B) +# Maximum percentage of satoshis_per_byte to allow in RBF fee (default: 150) max_rbf = 150 ``` +NOTE: Ensuring that your miner can successfully use RBF (Replace-by-Fee) is +critical for reliable block production. If a miner fails to replace an outdated +block commit with a higher-fee transaction, it risks committing to an incorrect +tenure. This would prevent the miner from producing valid blocks during its +tenure, as it would be building on an invalid chain tip, causing the signers to +reject its blocks. + +To avoid this, configure satoshis_per_byte, rbf_fee_increment, and max_rbf to +allow for at least three fee increments within the max_rbf limit. This helps +ensure that your miner can adjust its fees sufficiently to stay on the canonical +chain. + You can verify that your node is operating as a miner by checking its log output to verify that it was able to find its Bitcoin UTXOs: From 4c59af8cfe30f35a532502d2df9868f417afa0dc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 7 Nov 2024 14:05:02 -0500 Subject: [PATCH 094/109] chore: address PR feedback --- stackslib/src/net/p2p.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index c847fa68a3..a20145a2b6 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4323,9 +4323,6 @@ impl PeerNetwork { tip_height, chainstate, ); - if reorg { - info!("Burnchain or Stacks reorg detected; will invalidate cached reward set for cycle {rc}"); - } return Ok(reorg); } else { // epoch 2 From 93979d1ef4a466c1c33bafa9b62e8eda8c86ec11 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 7 Nov 2024 14:12:34 -0500 Subject: [PATCH 095/109] test: remove possible flakiness from `block_commit_delay` --- testnet/stacks-node/src/tests/signer/v0.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index dee6b294de..27a7566b1e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -6154,6 +6154,11 @@ fn block_commit_delay() { signer_test.boot_to_epoch_3(); + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + next_block_and_process_new_stacks_block( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -6161,6 +6166,16 @@ fn block_commit_delay() { ) .expect("Failed to mine first block"); + // Ensure that the block commit has been sent before continuing + wait_for(60, || { + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits > commits_before) + }) + .expect("Timed out waiting for block commit after new Stacks block"); + // Prevent a block from being mined by making signers reject it. let all_signers = signer_test .signer_stacks_private_keys From f42bc48a54c95f4501283ce904914d70fa6a965f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 7 Nov 2024 13:23:32 -0800 Subject: [PATCH 096/109] Fix miner forking by being strict about sortition winners Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 357 +++++++++++++-------- 1 file changed, 232 insertions(+), 125 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4250097dbe..fa86d73d05 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1806,8 +1806,8 @@ fn miner_forking() { let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); let Counters { - naka_skip_commit_op, - naka_submitted_commits: second_miner_commits_submitted, + naka_skip_commit_op: skip_commit_op_rl2, + naka_submitted_commits: commits_submitted_rl2, .. } = run_loop_2.counters(); let _run_loop_2_thread = thread::Builder::new() @@ -1828,149 +1828,256 @@ fn miner_forking() { }) .expect("Timed out waiting for boostrapped node to catch up to the miner"); + let commits_submitted_rl1 = signer_test.running_nodes.commits_submitted.clone(); + let skip_commit_op_rl1 = signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .clone(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; - naka_skip_commit_op.set(true); + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; info!("------------------------- Reached Epoch 3.0 -------------------------"); - let mut sortitions_seen = Vec::new(); - let run_sortition = || { - info!("Pausing stacks block proposal to force an empty tenure commit from RL2"); - TEST_BROADCAST_STALL.lock().unwrap().replace(true); + info!("Pausing both miners' block commit submissions"); + skip_commit_op_rl1.set(true); + skip_commit_op_rl2.set(true); - let rl2_commits_before = second_miner_commits_submitted.load(Ordering::SeqCst); - let rl1_commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); + info!("Flushing any pending commits to enable custom winner selection"); + let burn_height_before = get_burn_height(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); - naka_skip_commit_op.set(false); + info!("------------------------- RL1 Wins Sortition -------------------------"); + info!("Pausing stacks block proposal to force an empty tenure commit from RL2"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); - // wait until a commit is submitted by run_loop_2 - wait_for(60, || { - let commits_count = second_miner_commits_submitted.load(Ordering::SeqCst); - Ok(commits_count > rl2_commits_before) - }) + info!("Unpausing commits from RL1"); + skip_commit_op_rl1.set(false); + + info!("Waiting for commits from RL1"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); + + info!("Pausing commits from RL1"); + skip_commit_op_rl1.set(true); + + let burn_height_before = get_burn_height(); + info!("Mine RL1 Tenure"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + // fetch the current sortition info + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // make sure the tenure was won by RL1 + assert!(tip.sortition, "No sortition was won"); + assert_eq!( + tip.miner_pk_hash.unwrap(), + mining_pkh_1, + "RL1 did not win the sortition" + ); + + info!( + "------------------------- RL2 Wins Sortition With Outdated View -------------------------" + ); + let rl2_commits_before = commits_submitted_rl2.load(Ordering::SeqCst); + + info!("Unpausing commits from RL2"); + skip_commit_op_rl2.set(false); + + info!("Waiting for commits from RL2"); + wait_for(30, || { + Ok(commits_submitted_rl2.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); + + info!("Pausing commits from RL2"); + skip_commit_op_rl2.set(true); + + // unblock block mining + let blocks_len = test_observer::get_blocks().len(); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + // Wait for the block to be broadcasted and processed + wait_for(30, || Ok(test_observer::get_blocks().len() > blocks_len)) + .expect("Timed out waiting for a block to be processed"); + + // sleep for 2*first_proposal_burn_block_timing to prevent the block timing from allowing a fork by the signer set + thread::sleep(Duration::from_secs(first_proposal_burn_block_timing * 2)); + + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) + .into_iter() + .map(|header| { + info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %tip.consensus_hash); + (header.consensus_hash, header) + }) + .collect(); + + let header_info = nakamoto_headers.get(&tip.consensus_hash).unwrap(); + let header = header_info + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .clone(); + + mining_pk_1 + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) .unwrap(); - // wait until a commit is submitted by run_loop_1 - wait_for(60, || { - let commits_count = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits_count > rl1_commits_before) + + let blocks_len = test_observer::get_blocks().len(); + let burn_height_before = get_burn_height(); + info!("Mine RL2 Tenure"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + // Ensure that RL2 doesn't produce a valid block + assert!( + wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)).is_err(), + "RL2 produced a block" + ); + + // fetch the current sortition info + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // make sure the tenure was won by RL2 + assert!(tip.sortition, "No sortition was won"); + assert_eq!( + tip.miner_pk_hash.unwrap(), + mining_pkh_2, + "RL2 did not win the sortition" + ); + + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) + .into_iter() + .map(|header| { + info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %tip.consensus_hash); + (header.consensus_hash, header) }) - .unwrap(); + .collect(); + assert!(!nakamoto_headers.contains_key(&tip.consensus_hash)); - // fetch the current sortition info - let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + info!("------------------------- RL1 RBFs its Own Commit -------------------------"); + info!("Pausing stacks block proposal to test RBF capability"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); - // block commits from RL2 -- this will block until the start of the next iteration - // in this loop. - naka_skip_commit_op.set(true); - // ensure RL1 performs an RBF after unblock block broadcast - let rl1_commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); + info!("Unpausing commits from RL1"); + skip_commit_op_rl1.set(false); - // unblock block mining - let blocks_len = test_observer::get_blocks().len(); - TEST_BROADCAST_STALL.lock().unwrap().replace(false); + info!("Waiting for commits from RL1"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); - // wait for a block to be processed (or timeout!) - if wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)).is_err() { - info!("Timeout waiting for a block process: assuming this is because RL2 attempted to fork-- will check at end of test"); - return (sort_tip, false); - } + info!("Pausing commits from RL1"); + skip_commit_op_rl1.set(true); - info!("Nakamoto block processed, waiting for commit from RL1"); + let burn_height_before = get_burn_height(); + info!("Mine RL1 Tenure"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); - // wait for a commit from RL1 - wait_for(60, || { - let commits_count = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits_count > rl1_commits_before) - }) - .unwrap(); + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); - // sleep for 2*first_proposal_burn_block_timing to prevent the block timing from allowing a fork by the signer set - thread::sleep(Duration::from_secs(first_proposal_burn_block_timing * 2)); - (sort_tip, true) - }; + info!("Unpausing commits from RL1"); + skip_commit_op_rl1.set(false); - let mut won_by_miner_2_but_no_tenure = false; - let mut won_by_miner_1_after_tenureless_miner_2 = false; - let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); - // miner 2 is expected to be valid iff: - // (a) its the first nakamoto tenure - // (b) the prior sortition didn't have a tenure (because by this time RL2 will have up-to-date block processing) - let mut expects_miner_2_to_be_valid = true; - // due to the random nature of mining sortitions, the way this test is structured - // is that keeps track of two scenarios that we want to cover, and once enough sortitions - // have been produced to cover those scenarios, it stops and checks the results at the end. - while !(won_by_miner_2_but_no_tenure && won_by_miner_1_after_tenureless_miner_2) { - let nmb_sortitions_seen = sortitions_seen.len(); - assert!(max_sortitions >= nmb_sortitions_seen, "Produced {nmb_sortitions_seen} sortitions, but didn't cover the test scenarios, aborting"); - let (sortition_data, had_tenure) = run_sortition(); - sortitions_seen.push((sortition_data.clone(), had_tenure)); - - let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) - .into_iter() - .map(|header| { - info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %sortition_data.consensus_hash); - (header.consensus_hash, header) - }) - .collect(); + info!("Waiting for commits from RL1"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); - if had_tenure { - let header_info = nakamoto_headers - .get(&sortition_data.consensus_hash) - .unwrap(); - let header = header_info - .anchored_header - .as_stacks_nakamoto() - .unwrap() - .clone(); - let mined_by_miner_1 = miner_1_pk - .verify( - header.miner_signature_hash().as_bytes(), - &header.miner_signature, - ) - .unwrap(); + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); + // unblock block mining + let blocks_len = test_observer::get_blocks().len(); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); - info!("Block check"; - "height" => header.chain_length, - "consensus_hash" => %header.consensus_hash, - "block_hash" => %header.block_hash(), - "stacks_block_id" => %header.block_id(), - "mined_by_miner_1?" => mined_by_miner_1, - "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); - if !mined_by_miner_1 { - assert!(expects_miner_2_to_be_valid, "If a block was produced by miner 2, we should have expected miner 2 to be valid"); - } else if won_by_miner_2_but_no_tenure { - // the tenure was won by miner 1, they produced a block, and this follows a tenure that miner 2 won but couldn't - // mine during because they tried to fork. - won_by_miner_1_after_tenureless_miner_2 = true; - } + // Wait for the block to be broadcasted and processed + wait_for(30, || Ok(test_observer::get_blocks().len() > blocks_len)) + .expect("Timed out waiting for a block to be processed"); - // even if it was mined by miner 2, their next block commit should be invalid! - expects_miner_2_to_be_valid = false; - } else { - info!("Sortition without tenure"; "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); - assert!(!nakamoto_headers.contains_key(&sortition_data.consensus_hash)); - assert!(!expects_miner_2_to_be_valid, "If no blocks were produced in the tenure, it should be because miner 2 committed to a fork"); - won_by_miner_2_but_no_tenure = true; - expects_miner_2_to_be_valid = true; - } - } + info!("Ensure that RL1 performs an RBF after unblocking block broadcast"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to RBF its old commit op"); + + info!("Mine RL1 Tenure"); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + // fetch the current sortition info + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // make sure the tenure was won by RL1 + assert!(tip.sortition, "No sortition was won"); + assert_eq!( + tip.miner_pk_hash.unwrap(), + mining_pkh_1, + "RL1 did not win the sortition" + ); + + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) + .into_iter() + .map(|header| { + info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %tip.consensus_hash); + (header.consensus_hash, header) + }) + .collect(); + + let header_info = nakamoto_headers.get(&tip.consensus_hash).unwrap(); + let header = header_info + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .clone(); + + mining_pk_1 + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap(); + + info!("------------------------- Verify Peer Data -------------------------"); let peer_1_height = get_chain_info(&conf).stacks_tip_height; let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; From e59dfc800b487a59c59bf221ce3249370aaa316f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 7 Nov 2024 14:03:41 -0800 Subject: [PATCH 097/109] Store blocks before and info before data PRIOR to forcing a stx block Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 27a7566b1e..a1267f49ed 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5177,6 +5177,11 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { // Clear the stackerdb chunks test_observer::clear(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 let transfer_tx = make_stacks_transfer( &sender_sk, @@ -5189,11 +5194,6 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+1"); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); wait_for(short_timeout, || { let accepted_signers = test_observer::get_stackerdb_chunks() .into_iter() @@ -5231,6 +5231,11 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { info!("------------------------- Starting Tenure B -------------------------"); // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and( @@ -5246,11 +5251,6 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { info!( "------------------------- Attempt to mine Nakamoto Block N+1' in Tenure B -------------------------" ); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); // The miner's proposed block should get rejected by all the signers that PREVIOUSLY accepted the block wait_for(short_timeout, || { let rejected_signers = test_observer::get_stackerdb_chunks() From 640fd6d4f8e5c9f8869fee21b65d030323e6e2ef Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 7 Nov 2024 17:06:00 -0500 Subject: [PATCH 098/109] test: wait for burn block change instead of block commit --- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a1267f49ed..e203758f45 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5236,14 +5236,12 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); - let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count > commits_before) + let info = signer_test.stacks_client.get_peer_info().unwrap(); + Ok(info.burn_block_height > info_before.burn_block_height) }, ) .unwrap(); From 196446396d6d8f4b9c46b1dceb5e7da4492ced74 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 7 Nov 2024 17:23:01 -0500 Subject: [PATCH 099/109] chore: simplify logic in test --- testnet/stacks-node/src/tests/signer/v0.rs | 56 ++++++++++------------ 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e203758f45..0f9041bd86 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5230,47 +5230,41 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); info!("------------------------- Starting Tenure B -------------------------"); - // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); + + // Clear the test observer so any old rejections are not counted + test_observer::clear(); + + // Start a new tenure and ensure the we see the expected rejections next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, || { - let info = signer_test.stacks_client.get_peer_info().unwrap(); - Ok(info.burn_block_height > info_before.burn_block_height) + let rejected_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signature, + signer_signature_hash, + .. + })) => non_ignoring_signers.iter().find(|key| { + key.verify(signer_signature_hash.bits(), &signature).is_ok() + }), + _ => None, + } + }) + .collect::>(); + Ok(rejected_signers.len() + ignoring_signers.len() == num_signers) }, ) - .unwrap(); - - info!( - "------------------------- Attempt to mine Nakamoto Block N+1' in Tenure B -------------------------" - ); - // The miner's proposed block should get rejected by all the signers that PREVIOUSLY accepted the block - wait_for(short_timeout, || { - let rejected_signers = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { - signature, - signer_signature_hash, - .. - })) => non_ignoring_signers - .iter() - .find(|key| key.verify(signer_signature_hash.bits(), &signature).is_ok()), - _ => None, - } - }) - .collect::>(); - Ok(rejected_signers.len() + ignoring_signers.len() == num_signers) - }) .expect("FAIL: Timed out waiting for block proposal rejections"); let blocks_after = mined_blocks.load(Ordering::SeqCst); @@ -5279,7 +5273,7 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { .get_peer_info() .expect("Failed to get peer info"); assert_eq!(blocks_after, blocks_before); - assert_eq!(info_after, info_before); + assert_eq!(info_after.stacks_tip, info_before.stacks_tip); // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1' let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1_prime = nakamoto_blocks.last().unwrap(); From 04c21badb49fb5a3994869ef2b0058d8d7cbba63 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Thu, 7 Nov 2024 17:56:51 -0500 Subject: [PATCH 100/109] test: remove flakiness in `wait_for_block_acceptance` We can't wait for all signatures, because if a signer sees the global acceptance before sending its signature, it will not send it. --- testnet/stacks-node/src/tests/signer/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 622e31bdd6..22f58291ff 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -557,7 +557,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Result<(), String> { - // Make sure that ALL signers accepted the block proposal + // Make sure that at least 70% of signers accepted the block proposal wait_for(timeout_secs, || { let signatures = test_observer::get_stackerdb_chunks() .into_iter() @@ -585,7 +585,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>(); - Ok(signatures.len() == expected_signers.len()) + Ok(signatures.len() > expected_signers.len() * 7 / 10) }) } From 2dced246a7aba485b17818d62fd204435749509e Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Thu, 7 Nov 2024 16:01:43 -0800 Subject: [PATCH 101/109] CRC: when reissuing a tenure change payload use the winning stacks block hash to form the parent_tenure_start Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/nakamoto_node/miner.rs | 15 ++++++--------- testnet/stacks-node/src/nakamoto_node/relayer.rs | 14 +++++++------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 95fbb9aebd..f4d0ad0a39 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -922,25 +922,22 @@ impl BlockMinerThread { fn make_vrf_proof(&mut self) -> Option { // if we're a mock miner, then make sure that the keychain has a keypair for the mocked VRF // key - let sortition_hash = if matches!(self.reason, MinerReason::EmptyTenure) { - self.burn_election_block.sortition_hash - } else { - self.burn_block.sortition_hash - }; let vrf_proof = if self.config.get_node_config(false).mock_mining { - self.keychain - .generate_proof(VRF_MOCK_MINER_KEY, sortition_hash.as_bytes()) + self.keychain.generate_proof( + VRF_MOCK_MINER_KEY, + self.burn_election_block.sortition_hash.as_bytes(), + ) } else { self.keychain.generate_proof( self.registered_key.target_block_height, - sortition_hash.as_bytes(), + self.burn_election_block.sortition_hash.as_bytes(), ) }; debug!( "Generated VRF Proof: {} over {} ({},{}) with key {}", vrf_proof.to_hex(), - &sortition_hash, + &self.burn_election_block.sortition_hash, &self.burn_block.block_height, &self.burn_block.burn_header_hash, &self.registered_key.vrf_public_key.to_hex() diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index e031ccfd83..59f6bb6334 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -912,7 +912,7 @@ impl RelayerThread { return Ok(()); } - let last_non_empty_sortition_snapshot = + let canonical_block_snapshot = SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) .map_err(|e| { error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); @@ -923,22 +923,22 @@ impl RelayerThread { NakamotoNodeError::SnapshotNotFoundForChainTip })?; - let won_last_non_empty_sortition_snapshot = - last_non_empty_sortition_snapshot.miner_pk_hash == Some(mining_pkh); + let won_canonical_block_snapshot = + canonical_block_snapshot.miner_pk_hash == Some(mining_pkh); let (parent_tenure_start, block_election_snapshot, reason) = - if !won_last_non_empty_sortition_snapshot { + if !won_canonical_block_snapshot { debug!("Relayer: Failed to issue a tenure change payload in our last tenure. Issue a new tenure change payload."); ( - canonical_stacks_tip, + StacksBlockId(last_block_election_snapshot.winning_stacks_block_hash.0), last_block_election_snapshot, MinerReason::EmptyTenure, ) } else { debug!("Relayer: Successfully issued a tenure change payload in its tenure. Issue a continue extend from the chain tip."); ( - canonical_stacks_tip, //For tenure extend, we sould be extending off the canonical tip - last_non_empty_sortition_snapshot, + canonical_stacks_tip, //For tenure extend, we should be extending off the canonical tip + canonical_block_snapshot, MinerReason::Extended { burn_view_consensus_hash: new_burn_view, }, From 0ae7558d7dfccd7b0eb92aaa6182ae914e9e9583 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Thu, 7 Nov 2024 17:05:10 -0800 Subject: [PATCH 102/109] feat: add blockhash index to metadata_table Clarity VM table --- CHANGELOG.md | 3 ++- clarity/src/vm/database/sqlite.rs | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64a237ba01..7b130e182c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - Add index for StacksBlockId to nakamoto block headers table (improves node performance) - Remove the panic for reporting DB deadlocks (just error and continue waiting) +- Add index to `metadata_table` in Clarity DB on `blockhash` ## [3.0.0.0.1] @@ -17,7 +18,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Add index for StacksBlockId to nakamoto block headers table (improves node performance) - Remove the panic for reporting DB deadlocks (just error and continue waiting) - Various test fixes for CI (5353, 5368, 5372, 5371, 5380, 5378, 5387, 5396, 5390, 5394) -- Various log fixes: +- Various log fixes: - don't say proceeding to mine blocks if not a miner - misc. warns downgraded to debugs - 5391: Update default block proposal timeout to 10 minutes diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 7d2af59eb5..0e0f0e3f6e 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -248,6 +248,12 @@ impl SqliteConnection { ) .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; + conn.execute( + "CREATE INDEX IF NOT EXISTS md_blockhashes ON metadata_table(blockhash)", + NO_PARAMS, + ) + .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; + Self::check_schema(conn)?; Ok(()) From ebd7904dcf47f71c02905772b94afeb874e629e0 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 8 Nov 2024 09:16:35 -0800 Subject: [PATCH 103/109] Cleanup a wait Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 4250097dbe..93d8447419 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5952,14 +5952,14 @@ fn continue_after_fast_block_no_sortition() { let commits_before_1 = rl1_commits.load(Ordering::SeqCst); let commits_before_2 = rl2_commits.load(Ordering::SeqCst); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); btc_blocks_mined += 1; - wait_for(30, || Ok(get_burn_height() > burn_height_before)).unwrap(); - assert_eq!(rl1_commits.load(Ordering::SeqCst), commits_before_1); assert_eq!(rl2_commits.load(Ordering::SeqCst), commits_before_2); assert_eq!( From 38bd6c77b28a84bb7415eed5f061a5ecd7e62786 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 8 Nov 2024 09:42:02 -0800 Subject: [PATCH 104/109] Fix comments Signed-off-by: Jacinta Ferrant --- stacks-signer/src/config.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index b11dcc4a41..f25e106d34 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -161,7 +161,7 @@ pub struct GlobalConfig { pub block_proposal_timeout: Duration, /// An optional custom Chain ID pub chain_id: Option, - /// How long to wait in for a response from a block proposal validation response from the node + /// How long to wait for a response from a block proposal validation response from the node /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout: Duration, } @@ -186,14 +186,14 @@ struct RawConfigFile { pub db_path: String, /// Metrics endpoint pub metrics_endpoint: Option, - /// How much time must pass between the first block proposal in a tenure and the next bitcoin block - /// before a subsequent miner isn't allowed to reorg the tenure + /// How much time (in secs) must pass between the first block proposal in a tenure and the next bitcoin block + /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing_secs: Option, - /// How much time to wait for a miner to propose a block following a sortition in milliseconds + /// How much time (in millisecs) to wait for a miner to propose a block following a sortition pub block_proposal_timeout_ms: Option, /// An optional custom Chain ID pub chain_id: Option, - /// How long to wait n milliseconds for a response from a block proposal validation response from the node + /// How long to wait (in millisecs) for a response from a block proposal validation response from the node /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout_ms: Option, } From b291df6e8f28e2b4dd16dd35ecbcdaf1e8c0cf54 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 8 Nov 2024 09:55:14 -0800 Subject: [PATCH 105/109] FIx poor merge Signed-off-by: Jacinta Ferrant --- stacks-signer/src/config.rs | 6 ------ testnet/stacks-node/src/tests/signer/v0.rs | 1 + 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 026f652f12..57c90ab0eb 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -291,12 +291,6 @@ impl TryFrom for GlobalConfig { .unwrap_or(DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS), ); - let tenure_last_block_proposal_timeout = Duration::from_secs( - raw_data - .tenure_last_block_proposal_timeout_secs - .unwrap_or(DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS), - ); - let block_proposal_validation_timeout = Duration::from_millis( raw_data .block_proposal_validation_timeout_ms diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d7effc2a34..07062b2de7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -6195,6 +6195,7 @@ fn block_validation_response_timeout() { info!("------------------------- Propose Another Block Before Hitting the Timeout -------------------------"); let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), + tenure_last_block_proposal_timeout: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(100), }; let mut block = NakamotoBlock { From e37c8782d0ccab9711af28ffde24a95b8a312177 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 8 Nov 2024 12:39:01 -0800 Subject: [PATCH 106/109] Fix test Signed-off-by: Jacinta Ferrant --- stackslib/src/net/api/postblock_proposal.rs | 29 +++++------- testnet/stacks-node/src/tests/signer/v0.rs | 50 +++++++++++++-------- 2 files changed, 43 insertions(+), 36 deletions(-) diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 517105515c..b67b6166aa 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -343,6 +343,17 @@ impl NakamotoBlockProposal { sortdb: &SortitionDB, chainstate: &mut StacksChainState, // not directly used; used as a handle to open other chainstates ) -> Result { + #[cfg(any(test, feature = "testing"))] + { + if *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block validation is stalled due to testing directive."); + while *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block validation is no longer stalled due to testing directive."); + } + } let ts_start = get_epoch_time_ms(); // Measure time from start of function let time_elapsed = || get_epoch_time_ms().saturating_sub(ts_start); @@ -533,24 +544,6 @@ impl NakamotoBlockProposal { }); } - #[cfg(any(test, feature = "testing"))] - { - if *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block validation is stalled due to testing directive."; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - ); - while *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Block validation is no longer stalled due to testing directive."; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - ); - } - } - info!( "Participant: validated anchored block"; "block_header_hash" => %computed_block_header_hash, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 35327d93c4..a58c539925 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -431,7 +431,8 @@ impl SignerTest { /// The stacks node is advanced to epoch 3.0 reward set calculation to ensure the signer set is determined. /// An invalid block proposal is forcibly written to the miner's slot to simulate the miner proposing a block. /// The signers process the invalid block by first verifying it against the stacks node block proposal endpoint. -/// The signers then broadcast a rejection of the miner's proposed block back to the respective .signers-XXX-YYY contract. +/// The signer that submitted the initial block validation request, should issue a broadcast a rejection of the +/// miner's proposed block back to the respective .signers-XXX-YYY contract. /// /// Test Assertion: /// Each signer successfully rejects the invalid block proposal. @@ -6240,8 +6241,9 @@ fn block_commit_delay() { signer_test.shutdown(); } -// Ensures that a signer will issue ConnectivityIssues rejections if a block submission -// times out. Also ensures that no other proposal gets submitted for validation if we +// Ensures that a signer that successfully submits a block to the node for validation +// will issue ConnectivityIssues rejections if a block submission times out. +// Also ensures that no other proposal gets submitted for validation if we // are already waiting for a block submission response. #[test] #[ignore] @@ -6344,11 +6346,8 @@ fn block_validation_response_timeout() { std::thread::sleep(timeout.saturating_sub(elapsed)); info!("------------------------- Wait for Block Rejection Due to Timeout -------------------------"); - // Verify the signers rejected the first block due to timeout - let mut rejected_signers = vec![]; - let start = Instant::now(); - while rejected_signers.len() < num_signers { - std::thread::sleep(Duration::from_secs(1)); + // Verify that the signer that submits the block to the node will issue a ConnectivityIssues rejection + wait_for(30, || { let chunks = test_observer::get_stackerdb_chunks(); for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) @@ -6359,7 +6358,6 @@ fn block_validation_response_timeout() { reason: _reason, reason_code, signer_signature_hash, - signature, .. })) = message else { @@ -6372,27 +6370,43 @@ fn block_validation_response_timeout() { "Received a rejection for the wrong block" ); if matches!(reason_code, RejectCode::ConnectivityIssues) { - rejected_signers.push(signature); + return Ok(true); } } - assert!( - start.elapsed() <= timeout, - "Timed out after waiting for ConenctivityIssues block rejection" - ); - } + Ok(false) + }) + .expect("Timed out waiting for block proposal rejections"); // Make sure our chain has still not advanced let info_after = get_chain_info(&signer_test.running_nodes.conf); assert_eq!(info_before, info_after); - + let info_before = info_after; info!("Unpausing block validation"); - // Disable the stall and wait for the block to be processed + // Disable the stall and wait for the block to be processed successfully TEST_VALIDATE_STALL.lock().unwrap().replace(false); + wait_for(30, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be processed"); + let info_after = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1, + ); info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + let info_before = info_after; signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + wait_for(30, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }) + .unwrap(); + + let info_after = get_chain_info(&signer_test.running_nodes.conf); assert_eq!( - get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height, + info_after.stacks_tip_height, info_before.stacks_tip_height + 1, ); } From 60bb664a50668c355591d30206b35afc2edcc560 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 8 Nov 2024 12:40:40 -0800 Subject: [PATCH 107/109] Fix clippy in stacks node Signed-off-by: Jacinta Ferrant --- testnet/stacks-node/src/tests/signer/v0.rs | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index a58c539925..c4f00a39fc 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2595,7 +2595,7 @@ fn empty_sortition_before_approval() { let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |config| { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; @@ -2696,15 +2696,14 @@ fn empty_sortition_before_approval() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => match payload.cause { + if let TransactionPayload::TenureChange(payload) = &parsed.payload { + match payload.cause { TenureChangeCause::Extended => { info!("Found tenure extend block"); return Ok(true); } TenureChangeCause::BlockFound => {} - }, - _ => {} + } }; } Ok(false) @@ -2769,7 +2768,7 @@ fn empty_sortition_before_proposal() { let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |config| { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; @@ -2854,15 +2853,14 @@ fn empty_sortition_before_proposal() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => match payload.cause { + if let TransactionPayload::TenureChange(payload) = &parsed.payload { + match payload.cause { TenureChangeCause::Extended => { info!("Found tenure extend block"); return Ok(true); } TenureChangeCause::BlockFound => {} - }, - _ => {} + } }; } Ok(false) @@ -6268,7 +6266,7 @@ fn block_validation_response_timeout() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |config| { config.block_proposal_validation_timeout = timeout; }, From f7a02b8a54a820288633a4de487ba23f2d2fe339 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 8 Nov 2024 16:03:13 -0500 Subject: [PATCH 108/109] test: reduce flakiness in `microblocks_disabled` --- testnet/stacks-node/src/tests/epoch_25.rs | 107 +++++++++------------- 1 file changed, 44 insertions(+), 63 deletions(-) diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 4a3e2a4095..34083fb22a 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -23,7 +23,7 @@ use stacks_common::types::chainstate::StacksPrivateKey; use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; -use crate::tests::nakamoto_integrations::wait_for; +use crate::tests::nakamoto_integrations::{next_block_and, wait_for}; use crate::tests::neon_integrations::{ get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, @@ -162,6 +162,9 @@ fn microblocks_disabled() { // push us to block 205 next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // Ensure we start off with 0 microblocks + assert!(test_observer::get_microblocks().is_empty()); + let tx = make_stacks_transfer_mblock_only( &spender_1_sk, 0, @@ -172,7 +175,11 @@ fn microblocks_disabled() { ); submit_tx(&http_origin, &tx); - // wait until just before epoch 2.5 + // Wait for a microblock to be assembled + wait_for(60, || Ok(test_observer::get_microblocks().len() == 1)) + .expect("Failed to wait for microblocks to be assembled"); + + // mine Bitcoin blocks up until just before epoch 2.5 wait_for(120, || { let tip_info = get_chain_info(&conf); if tip_info.burn_block_height >= epoch_2_5 - 2 { @@ -183,6 +190,14 @@ fn microblocks_disabled() { }) .expect("Failed to wait until just before epoch 2.5"); + // Verify that the microblock was processed + let account = get_account(&http_origin, &spender_1_addr); + assert_eq!( + u64::try_from(account.balance).unwrap(), + spender_1_bal - 1_000 + ); + assert_eq!(account.nonce, 1); + let old_tip_info = get_chain_info(&conf); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -194,13 +209,8 @@ fn microblocks_disabled() { .expect("Failed to process block"); info!("Test passed processing 2.5"); - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); + // Submit another microblock only transaction let tx = make_stacks_transfer_mblock_only( &spender_1_sk, 1, @@ -211,19 +221,12 @@ fn microblocks_disabled() { ); submit_tx(&http_origin, &tx); - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..5 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for(30, || { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - return Ok(true); - } - Ok(false) - }) - .expect("Failed to mine"); - } + // Wait for a microblock to be assembled, but expect none to be assembled + wait_for(30, || Ok(test_observer::get_microblocks().len() > 1)) + .expect_err("Microblocks should not have been assembled"); + + // Mine a block to see if the microblock gets processed + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // second transaction should not have been processed! let account = get_account(&http_origin, &spender_1_addr); @@ -233,31 +236,18 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - let microblocks_assembled = test_observer::get_microblocks().len(); - info!("Microblocks assembled: {microblocks_assembled}",); - assert!( - microblocks_assembled > 0, - "There should be at least 1 microblock assembled" - ); - let miner_nonce_before_microblock_assembly = get_account(&http_origin, &miner_account).nonce; // Now, lets tell the miner to try to mine microblocks, but don't try to confirm them! + info!("Setting STACKS_TEST_FORCE_MICROBLOCKS_POST_25"); env::set_var("STACKS_TEST_FORCE_MICROBLOCKS_POST_25", "1"); - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..2 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for(30, || { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - return Ok(true); - } - Ok(false) - }) - .expect("Failed to mine"); - } + // Wait for a second microblock to be assembled + wait_for(60, || Ok(test_observer::get_microblocks().len() == 2)) + .expect("Failed to wait for microblocks to be assembled"); + + // Mine a block to see if the microblock gets processed + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let miner_nonce_after_microblock_assembly = get_account(&http_origin, &miner_account).nonce; @@ -270,44 +260,35 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - // but we should have assembled and announced at least 1 more block to the observer - assert!(test_observer::get_microblocks().len() > microblocks_assembled); info!( "Microblocks assembled: {}", test_observer::get_microblocks().len() ); // and our miner should have gotten some blocks accepted - assert!( - miner_nonce_after_microblock_assembly > miner_nonce_before_microblock_assembly, + assert_eq!( + miner_nonce_after_microblock_assembly, miner_nonce_before_microblock_assembly + 1, "Mined before started microblock assembly: {miner_nonce_before_microblock_assembly}, Mined after started microblock assembly: {miner_nonce_after_microblock_assembly}" ); // Now, tell the miner to try to confirm microblocks as well. // This should test that the block gets rejected by append block + info!("Setting STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25"); env::set_var("STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25", "1"); - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..2 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for(30, || { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - return Ok(true); - } - Ok(false) - }) - .expect("Failed to mine"); - } + // Wait for a third microblock to be assembled + wait_for(60, || Ok(test_observer::get_microblocks().len() == 3)) + .expect("Failed to wait for microblocks to be assembled"); + + // Mine a block to see if the microblock gets processed + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let miner_nonce_after_microblock_confirmation = get_account(&http_origin, &miner_account).nonce; - // and our miner should have gotten at most one more block accepted - // (because they may have had 1 block confirmation in the bitcoin mempool which didn't confirm a microblock - // before we flipped the flag) - assert!( - miner_nonce_after_microblock_confirmation <= miner_nonce_after_microblock_assembly + 1, + // our miner should not have gotten any more blocks accepted + assert_eq!( + miner_nonce_after_microblock_confirmation, + miner_nonce_after_microblock_assembly + 1, "Mined after started microblock confimration: {miner_nonce_after_microblock_confirmation}", ); From c1c54461abf851f072558c1ae7aa9ee373ea458f Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 11 Nov 2024 10:10:10 -0800 Subject: [PATCH 109/109] Replace make_tenure_bitvector debug logs with trace level logs Signed-off-by: Jacinta Ferrant --- stackslib/src/net/inv/nakamoto.rs | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 87209e4496..d5b08f56d2 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -407,16 +407,13 @@ impl InvGenerator { let cur_sortition_info = self.get_sortition_info(sortdb, &cur_consensus_hash)?; let parent_sortition_consensus_hash = cur_sortition_info.parent_consensus_hash; - debug!("Get sortition and tenure info for height {}. cur_consensus_hash = {}, cur_tenure_info = {:?}, parent_sortition_consensus_hash = {}", cur_height, &cur_consensus_hash, &cur_tenure_opt, &parent_sortition_consensus_hash); + trace!("Get sortition and tenure info for height {cur_height}. cur_consensus_hash = {cur_consensus_hash}, cur_tenure_info = {cur_tenure_opt:?}, parent_sortition_consensus_hash = {parent_sortition_consensus_hash}"); if let Some(cur_tenure_info) = cur_tenure_opt.as_ref() { // a tenure was active when this sortition happened... if cur_tenure_info.tenure_id_consensus_hash == cur_consensus_hash { // ...and this tenure started in this sortition - debug!( - "Tenure was started for {} (height {})", - cur_consensus_hash, cur_height - ); + trace!("Tenure was started for {cur_consensus_hash} (height {cur_height})"); tenure_status.push(true); cur_tenure_opt = self.get_processed_tenure( chainstate, @@ -426,19 +423,13 @@ impl InvGenerator { )?; } else { // ...but this tenure did not start in this sortition - debug!( - "Tenure was NOT started for {} (bit {})", - cur_consensus_hash, cur_height - ); + trace!("Tenure was NOT started for {cur_consensus_hash} (bit {cur_height})"); tenure_status.push(false); } } else { // no active tenure during this sortition. Check the parent sortition to see if a // tenure begain there. - debug!( - "No winning sortition for {} (bit {})", - cur_consensus_hash, cur_height - ); + trace!("No winning sortition for {cur_consensus_hash} (bit {cur_height})"); tenure_status.push(false); cur_tenure_opt = self.get_processed_tenure( chainstate, @@ -457,9 +448,9 @@ impl InvGenerator { } tenure_status.reverse(); - debug!( - "Tenure bits off of {} and {}: {:?}", - nakamoto_tip, &tip.consensus_hash, &tenure_status + trace!( + "Tenure bits off of {nakamoto_tip} and {}: {tenure_status:?}", + &tip.consensus_hash ); Ok(tenure_status) }