diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 23eed46f1e..52f46fbc49 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -99,6 +99,8 @@ jobs: - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid - tests::signer::v0::empty_sortition + - tests::signer::v0::empty_sortition_before_approval + - tests::signer::v0::empty_sortition_before_proposal - tests::signer::v0::bitcoind_forking_test - tests::signer::v0::multiple_miners - tests::signer::v0::mock_sign_epoch_25 @@ -112,6 +114,7 @@ jobs: - tests::signer::v0::locally_accepted_blocks_overriden_by_global_rejection - tests::signer::v0::locally_rejected_blocks_overriden_by_global_acceptance - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_succeeds + - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_fails - tests::signer::v0::miner_recovers_when_broadcast_block_delay_across_tenures_occurs - tests::signer::v0::multiple_miners_with_nakamoto_blocks - tests::signer::v0::partial_tenure_fork @@ -120,6 +123,9 @@ jobs: - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend - tests::signer::v0::multiple_miners_with_custom_chain_id + - tests::signer::v0::block_commit_delay + - tests::signer::v0::continue_after_fast_block_no_sortition + - tests::signer::v0::block_validation_response_timeout - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state @@ -133,6 +139,7 @@ jobs: - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::nakamoto_integrations::v3_signer_api_endpoint + - tests::nakamoto_integrations::signer_chainstate # TODO: enable these once v1 signer is supported by a new nakamoto epoch # - tests::signer::v1::dkg # - tests::signer::v1::sign_request_rejected diff --git a/CHANGELOG.md b/CHANGELOG.md index fe5e200d17..0470bab77b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,27 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Changed +- Add index for StacksBlockId to nakamoto block headers table (improves node performance) +- Remove the panic for reporting DB deadlocks (just error and continue waiting) +- Add index to `metadata_table` in Clarity DB on `blockhash` +- Add `block_commit_delay_ms` to the config file to control the time to wait after seeing a new burn block, before submitting a block commit, to allow time for the first Nakamoto block of the new tenure to be mined, allowing this miner to avoid the need to RBF the block commit. + +## [3.0.0.0.1] + +### Changed +- Add index for StacksBlockId to nakamoto block headers table (improves node performance) +- Remove the panic for reporting DB deadlocks (just error and continue waiting) +- Various test fixes for CI (5353, 5368, 5372, 5371, 5380, 5378, 5387, 5396, 5390, 5394) +- Various log fixes: + - don't say proceeding to mine blocks if not a miner + - misc. warns downgraded to debugs +- 5391: Update default block proposal timeout to 10 minutes +- 5406: After block rejection, miner pauses +- Docs fixes + - Fix signer docs link + - Specify burn block in clarity docs + ## [3.0.0.0.0] ### Added diff --git a/Cargo.lock b/Cargo.lock index 227cd9d768..8a3769b6a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3328,6 +3328,7 @@ dependencies = [ "stackslib", "stx-genesis", "tempfile", + "thiserror", "tikv-jemallocator", "tiny_http", "tokio", @@ -3592,18 +3593,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 10dc427e2e..c00c223c47 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } +thiserror = { version = "1.0.65" } # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 7d2af59eb5..0e0f0e3f6e 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -248,6 +248,12 @@ impl SqliteConnection { ) .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; + conn.execute( + "CREATE INDEX IF NOT EXISTS md_blockhashes ON metadata_table(blockhash)", + NO_PARAMS, + ) + .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; + Self::check_schema(conn)?; Ok(()) diff --git a/docs/mining.md b/docs/mining.md index 34a299cd1c..10f49c5620 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -19,14 +19,26 @@ nakamoto_attempt_time_ms = 20000 [burnchain] # Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election burn_fee_cap = 20000 -# Amount (in sats) per byte - Used to calculate the transaction fees -satoshis_per_byte = 25 -# Amount of sats to add when RBF'ing bitcoin tx (default: 5) +# Amount in sats per byte used to calculate the Bitcoin transaction fee (default: 50) +satoshis_per_byte = 50 +# Amount of sats per byte to add when RBF'ing a Bitcoin tx (default: 5) rbf_fee_increment = 5 -# Maximum percentage to RBF bitcoin tx (default: 150% of satsv/B) +# Maximum percentage of satoshis_per_byte to allow in RBF fee (default: 150) max_rbf = 150 ``` +NOTE: Ensuring that your miner can successfully use RBF (Replace-by-Fee) is +critical for reliable block production. If a miner fails to replace an outdated +block commit with a higher-fee transaction, it risks committing to an incorrect +tenure. This would prevent the miner from producing valid blocks during its +tenure, as it would be building on an invalid chain tip, causing the signers to +reject its blocks. + +To avoid this, configure satoshis_per_byte, rbf_fee_increment, and max_rbf to +allow for at least three fee increments within the max_rbf limit. This helps +ensure that your miner can adjust its fees sufficiently to stay on the canonical +chain. + You can verify that your node is operating as a miner by checking its log output to verify that it was able to find its Bitcoin UTXOs: diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 63241d3256..7c472365a1 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -30,7 +30,7 @@ slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib"} -thiserror = "1.0" +thiserror = { workspace = true } tiny_http = "0.12" [dev-dependencies] diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index 89fe4677c7..53564af597 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -51,26 +51,25 @@ pub fn update_lock_table(conn: &Connection) { /// Called by `rusqlite` if we are waiting too long on a database lock /// If called too many times, will assume a deadlock and panic pub fn tx_busy_handler(run_count: i32) -> bool { - const TIMEOUT: Duration = Duration::from_secs(300); const AVG_SLEEP_TIME_MS: u64 = 100; + // Every ~5min, report an error with a backtrace + // 5min * 60s/min * 1_000ms/s / 100ms + const ERROR_COUNT: u32 = 3_000; + // First, check if this is taking unreasonably long. If so, it's probably a deadlock let run_count = run_count.unsigned_abs(); - let approx_time_elapsed = - Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); - if approx_time_elapsed > TIMEOUT { - error!("Deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); + if run_count > 0 && run_count % ERROR_COUNT == 0 { + error!("Deadlock detected. Waited 5 minutes (estimated) for database lock."; "run_count" => run_count, "backtrace" => ?Backtrace::capture() ); for (k, v) in LOCK_TABLE.lock().unwrap().iter() { error!("Database '{k}' last locked by {v}"); } - panic!("Deadlock in thread {:?}", thread::current().name()); } let mut sleep_time_ms = 2u64.saturating_pow(run_count); - sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); if sleep_time_ms > AVG_SLEEP_TIME_MS { diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index 9d52f0dbbf..534f3f9969 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -215,14 +215,13 @@ fn make_json_logger() -> Logger { panic!("Tried to construct JSON logger, but stacks-blockchain built without slog_json feature enabled.") } -#[cfg(not(any(test, feature = "testing")))] fn make_logger() -> Logger { if env::var("STACKS_LOG_JSON") == Ok("1".into()) { make_json_logger() } else { let debug = env::var("STACKS_LOG_DEBUG") == Ok("1".into()); let pretty_print = env::var("STACKS_LOG_PP") == Ok("1".into()); - let decorator = slog_term::PlainSyncDecorator::new(std::io::stderr()); + let decorator = get_decorator(); let atty = isatty(Stream::Stderr); let drain = TermFormat::new(decorator, pretty_print, debug, atty); let logger = Logger::root(drain.ignore_res(), o!()); @@ -231,17 +230,13 @@ fn make_logger() -> Logger { } #[cfg(any(test, feature = "testing"))] -fn make_logger() -> Logger { - if env::var("STACKS_LOG_JSON") == Ok("1".into()) { - make_json_logger() - } else { - let debug = env::var("STACKS_LOG_DEBUG") == Ok("1".into()); - let plain = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); - let isatty = isatty(Stream::Stdout); - let drain = TermFormat::new(plain, false, debug, isatty); - let logger = Logger::root(drain.ignore_res(), o!()); - logger - } +fn get_decorator() -> slog_term::PlainSyncDecorator { + slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter) +} + +#[cfg(not(any(test, feature = "testing")))] +fn get_decorator() -> slog_term::PlainSyncDecorator { + slog_term::PlainSyncDecorator::new(std::io::stderr()) } fn inner_get_loglevel() -> slog::Level { diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 489fd39cf7..3183c0d5c3 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,7 +11,21 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed -## [3.0.0.0.0] +## [3.0.0.0.1.0] + +### Changed + +- Change block rejection message to generic block response + +## [3.0.0.0.0.1] + +### Added + +### Changed +- Update block proposal timeout default to 10 minutes (#5391) +- Updated documentation link in output (#5363) + +## [3.0.0.0.0.0] ### Added diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index da94cc10de..139c34fba8 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -38,7 +38,7 @@ slog-json = { version = "2.3.0", optional = true } slog-term = "2.6.0" stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib" } -thiserror = "1.0" +thiserror = { workspace = true } tiny_http = { version = "0.12", optional = true } toml = "0.5.6" tracing = "0.1.37" diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 44ae11b252..fa24c8b22e 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -19,15 +19,15 @@ use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::TenureChangePayload; use blockstack_lib::net::api::getsortition::SortitionInfo; use blockstack_lib::util_lib::db::Error as DBError; -use clarity::types::chainstate::BurnchainHeaderHash; use slog::{slog_info, slog_warn}; -use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; +use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash, StacksPublicKey}; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Hash160; use stacks_common::{info, warn}; use crate::client::{ClientError, CurrentAndLastSortition, StacksClient}; use crate::config::SignerConfig; -use crate::signerdb::{BlockState, SignerDb}; +use crate::signerdb::{BlockInfo, BlockState, SignerDb}; #[derive(thiserror::Error, Debug)] /// Error type for the signer chainstate module @@ -119,6 +119,9 @@ pub struct ProposalEvalConfig { pub first_proposal_burn_block_timing: Duration, /// Time between processing a sortition and proposing a block before the block is considered invalid pub block_proposal_timeout: Duration, + /// Time to wait for the last block of a tenure to be globally accepted or rejected before considering + /// a new miner's block at the same height as valid. + pub tenure_last_block_proposal_timeout: Duration, } impl From<&SignerConfig> for ProposalEvalConfig { @@ -126,6 +129,7 @@ impl From<&SignerConfig> for ProposalEvalConfig { Self { first_proposal_burn_block_timing: value.first_proposal_burn_block_timing, block_proposal_timeout: value.block_proposal_timeout, + tenure_last_block_proposal_timeout: value.tenure_last_block_proposal_timeout, } } } @@ -460,7 +464,36 @@ impl SortitionsView { Ok(true) } - /// Check if the tenure change block confirms the expected parent block (i.e., the last globally accepted block in the parent tenure) + /// Get the last block from the given tenure + /// Returns the last locally accepted block if it is not timed out, otherwise it will return the last globally accepted block. + fn get_tenure_last_block_info( + consensus_hash: &ConsensusHash, + signer_db: &SignerDb, + tenure_last_block_proposal_timeout: Duration, + ) -> Result, ClientError> { + // Get the last known block in the previous tenure + let last_locally_accepted_block = signer_db + .get_last_accepted_block(consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + + if let Some(local_info) = last_locally_accepted_block { + if let Some(signed_over_time) = local_info.signed_self { + if signed_over_time + tenure_last_block_proposal_timeout.as_secs() + > get_epoch_time_secs() + { + // The last locally accepted block is not timed out, return it + return Ok(Some(local_info)); + } + } + } + // The last locally accepted block is timed out, get the last globally accepted block + signer_db + .get_last_globally_accepted_block(consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string())) + } + + /// Check if the tenure change block confirms the expected parent block + /// (i.e., the last locally accepted block in the parent tenure, or if that block is timed out, the last globally accepted block in the parent tenure) /// It checks the local DB first, and if the block is not present in the local DB, it asks the /// Stacks node for the highest processed block header in the given tenure (and then caches it /// in the DB). @@ -473,24 +506,27 @@ impl SortitionsView { reward_cycle: u64, signer_db: &mut SignerDb, client: &StacksClient, + tenure_last_block_proposal_timeout: Duration, ) -> Result { - // If the tenure change block confirms the expected parent block, it should confirm at least one more block than the last globally accepted block in the parent tenure. - let last_globally_accepted_block = signer_db - .get_last_globally_accepted_block(&tenure_change.prev_tenure_consensus_hash) - .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + // If the tenure change block confirms the expected parent block, it should confirm at least one more block than the last accepted block in the parent tenure. + let last_block_info = Self::get_tenure_last_block_info( + &tenure_change.prev_tenure_consensus_hash, + signer_db, + tenure_last_block_proposal_timeout, + )?; - if let Some(global_info) = last_globally_accepted_block { + if let Some(info) = last_block_info { // N.B. this block might not be the last globally accepted block across the network; // it's just the highest one in this tenure that we know about. If this given block is // no higher than it, then it's definitely no higher than the last globally accepted // block across the network, so we can do an early rejection here. - if block.header.chain_length <= global_info.block.header.chain_length { + if block.header.chain_length <= info.block.header.chain_length { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, - "expected_at_least" => global_info.block.header.chain_length + 1, + "expected_at_least" => info.block.header.chain_length + 1, ); return Ok(false); } @@ -558,6 +594,7 @@ impl SortitionsView { reward_cycle, signer_db, client, + self.config.tenure_last_block_proposal_timeout, )?; if !confirms_expected_parent { return Ok(false); @@ -573,15 +610,15 @@ impl SortitionsView { if !is_valid_parent_tenure { return Ok(false); } - let last_in_tenure = signer_db + let last_in_current_tenure = signer_db .get_last_globally_accepted_block(&block.header.consensus_hash) .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; - if let Some(last_in_tenure) = last_in_tenure { + if let Some(last_in_current_tenure) = last_in_current_tenure { warn!( "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "last_in_tenure_signer_sighash" => %last_in_tenure.block.header.signer_signature_hash(), + "last_in_tenure_signer_sighash" => %last_in_current_tenure.block.header.signer_signature_hash(), ); return Ok(false); } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 9885182d98..37706368dc 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -411,6 +411,8 @@ pub(crate) mod tests { db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, + tenure_last_block_proposal_timeout: config.tenure_last_block_proposal_timeout, + block_proposal_validation_timeout: config.block_proposal_validation_timeout, } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 6fc7c7b2dd..57c90ab0eb 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -35,7 +35,9 @@ use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 600_000; +const BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS: u64 = 120_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; +const DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS: u64 = 30; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -128,6 +130,11 @@ pub struct SignerConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, + /// Time to wait for the last block of a tenure to be globally accepted or rejected + /// before considering a new miner's block at the same height as potentially valid. + pub tenure_last_block_proposal_timeout: Duration, + /// How much time to wait for a block proposal validation response before marking the block invalid + pub block_proposal_validation_timeout: Duration, } /// The parsed configuration for the signer @@ -158,6 +165,12 @@ pub struct GlobalConfig { pub block_proposal_timeout: Duration, /// An optional custom Chain ID pub chain_id: Option, + /// Time to wait for the last block of a tenure to be globally accepted or rejected + /// before considering a new miner's block at the same height as potentially valid. + pub tenure_last_block_proposal_timeout: Duration, + /// How long to wait for a response from a block proposal validation response from the node + /// before marking that block as invalid and rejecting it + pub block_proposal_validation_timeout: Duration, } /// Internal struct for loading up the config file @@ -180,13 +193,19 @@ struct RawConfigFile { pub db_path: String, /// Metrics endpoint pub metrics_endpoint: Option, - /// How much time must pass between the first block proposal in a tenure and the next bitcoin block - /// before a subsequent miner isn't allowed to reorg the tenure + /// How much time (in secs) must pass between the first block proposal in a tenure and the next bitcoin block + /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing_secs: Option, - /// How much time to wait for a miner to propose a block following a sortition in milliseconds + /// How much time (in millisecs) to wait for a miner to propose a block following a sortition pub block_proposal_timeout_ms: Option, /// An optional custom Chain ID pub chain_id: Option, + /// Time in seconds to wait for the last block of a tenure to be globally accepted or rejected + /// before considering a new miner's block at the same height as potentially valid. + pub tenure_last_block_proposal_timeout_secs: Option, + /// How long to wait (in millisecs) for a response from a block proposal validation response from the node + /// before marking that block as invalid and rejecting it + pub block_proposal_validation_timeout_ms: Option, } impl RawConfigFile { @@ -266,6 +285,18 @@ impl TryFrom for GlobalConfig { .unwrap_or(BLOCK_PROPOSAL_TIMEOUT_MS), ); + let tenure_last_block_proposal_timeout = Duration::from_secs( + raw_data + .tenure_last_block_proposal_timeout_secs + .unwrap_or(DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS), + ); + + let block_proposal_validation_timeout = Duration::from_millis( + raw_data + .block_proposal_validation_timeout_ms + .unwrap_or(BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS), + ); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -279,6 +310,8 @@ impl TryFrom for GlobalConfig { first_proposal_burn_block_timing, block_proposal_timeout, chain_id: raw_data.chain_id, + tenure_last_block_proposal_timeout, + block_proposal_validation_timeout, }) } } @@ -335,7 +368,7 @@ Metrics endpoint: {metrics_endpoint} /// Get the chain ID for the network pub fn to_chain_id(&self) -> u32 { - self.chain_id.unwrap_or_else(|| match self.network { + self.chain_id.unwrap_or(match self.network { Network::Mainnet => CHAIN_ID_MAINNET, Network::Testnet | Network::Mocknet => CHAIN_ID_TESTNET, }) diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index a23918f6f8..eac60cc53f 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -157,11 +157,7 @@ fn handle_generate_stacking_signature( fn handle_check_config(args: RunSignerArgs) { let config = GlobalConfig::try_from(&args.config).unwrap(); - println!( - "Signer version: {}\nConfig: \n{}", - VERSION_STRING.to_string(), - config - ); + println!("Signer version: {}\nConfig: \n{}", *VERSION_STRING, config); } fn handle_generate_vote(args: GenerateVoteArgs, do_print: bool) -> MessageSignature { diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index 32689c0f1d..400541d0e7 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -97,8 +97,7 @@ pub fn update_signer_nonce(nonce: u64) { #[allow(dead_code)] /// Remove the origin from the full path to avoid duplicate metrics for different origins fn remove_origin_from_path(full_path: &str, origin: &str) -> String { - let path = full_path.replace(origin, ""); - path + full_path.replace(origin, "") } /// Start a new RPC call timer. diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index a0e2b739e9..c8f6041478 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -283,6 +283,8 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo mainnet: self.config.network.is_mainnet(), db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, + tenure_last_block_proposal_timeout: self.config.tenure_last_block_proposal_timeout, + block_proposal_validation_timeout: self.config.block_proposal_validation_timeout, })) } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 06b9d703c3..1d2e650207 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -24,7 +24,6 @@ use blockstack_lib::util_lib::db::{ Error as DBError, }; use clarity::types::chainstate::{BurnchainHeaderHash, StacksAddress}; -use clarity::util::get_epoch_time_secs; use libsigner::BlockProposal; use rusqlite::{ params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Transaction, @@ -33,6 +32,7 @@ use serde::{Deserialize, Serialize}; use slog::{slog_debug, slog_error}; use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, define_u8_enum, error}; diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 886480f063..bec9f1258d 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -89,6 +89,7 @@ fn setup_test_environment( config: ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(5), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }, }; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2d6bfa003d..081d8b6a6b 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,6 +15,7 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; +use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::net::api::postblock_proposal::{ @@ -85,6 +86,11 @@ pub struct Signer { pub signer_db: SignerDb, /// Configuration for proposal evaluation pub proposal_config: ProposalEvalConfig, + /// How long to wait for a block proposal validation response to arrive before + /// marking a submitted block as invalid + pub block_proposal_validation_timeout: Duration, + /// The current submitted block proposal and its submission time + pub submitted_block_proposal: Option<(BlockProposal, Instant)>, } impl std::fmt::Display for Signer { @@ -127,6 +133,7 @@ impl SignerTrait for Signer { if event_parity == Some(other_signer_parity) { return; } + self.check_submitted_block_proposal(); debug!("{self}: Processing event: {event:?}"); let Some(event) = event else { // No event. Do nothing. @@ -191,7 +198,7 @@ impl SignerTrait for Signer { "block_height" => b.header.chain_length, "signer_sighash" => %b.header.signer_signature_hash(), ); - stacks_client.post_block_until_ok(self, &b); + stacks_client.post_block_until_ok(self, b); } SignerMessage::MockProposal(mock_proposal) => { let epoch = match stacks_client.get_node_epoch() { @@ -274,6 +281,8 @@ impl From for Signer { reward_cycle: signer_config.reward_cycle, signer_db, proposal_config, + submitted_block_proposal: None, + block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, } } } @@ -348,14 +357,14 @@ impl Signer { crate::monitoring::increment_block_responses_sent(accepted); } Err(e) => { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + warn!("{self}: Failed to send block response to stacker-db: {e:?}",); } } return; } info!( - "{self}: received a block proposal for a new block. Submit block for validation. "; + "{self}: received a block proposal for a new block."; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), "block_height" => block_proposal.block.header.chain_length, @@ -434,30 +443,8 @@ impl Signer { }; #[cfg(any(test, feature = "testing"))] - let block_response = match &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() { - Some(public_keys) => { - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private( - &self.private_key, - ), - ) { - warn!("{self}: Rejecting block proposal automatically due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::TestingDirective, - &self.private_key, - self.mainnet, - )) - } else { - None - } - } - None => block_response, - }; + let block_response = + self.test_reject_block_proposal(block_proposal, &mut block_info, block_response); if let Some(block_response) = block_response { // We know proposal is invalid. Send rejection message, do not do further validation @@ -478,14 +465,35 @@ impl Signer { Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), } } else { - // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. - // Do not store invalid blocks as this could DOS the signer. We only store blocks that are valid or unknown. - stacks_client - .submit_block_for_validation(block_info.block.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}"); - }); + // Just in case check if the last block validation submission timed out. + self.check_submitted_block_proposal(); + if self.submitted_block_proposal.is_none() { + // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. + info!( + "{self}: submitting block proposal for validation"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + "block_height" => block_proposal.block.header.chain_length, + "burn_height" => block_proposal.burn_height, + ); + match stacks_client.submit_block_for_validation(block_info.block.clone()) { + Ok(_) => { + self.submitted_block_proposal = + Some((block_proposal.clone(), Instant::now())); + } + Err(e) => { + warn!("{self}: Failed to submit block for validation: {e:?}"); + } + }; + } else { + // Still store the block but log we can't submit it for validation. We may receive enough signatures/rejections + // from other signers to push the proposed block into a global rejection/acceptance regardless of our participation. + // However, we will not be able to participate beyond this until our block submission times out or we receive a response + // from our node. + warn!("{self}: cannot submit block proposal for validation as we are already waiting for a response for a prior submission") + } + // Do not store KNOWN invalid blocks as this could DOS the signer. We only store blocks that are valid or unknown. self.signer_db .insert_block(&block_info) .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); @@ -515,6 +523,16 @@ impl Signer { ) -> Option { crate::monitoring::increment_block_validation_responses(true); let signer_signature_hash = block_validate_ok.signer_signature_hash; + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| { + proposal.block.header.signer_signature_hash() == signer_signature_hash + }) + .unwrap_or(false) + { + self.submitted_block_proposal = None; + } // For mutability reasons, we need to take the block_info out of the map and add it back after processing let mut block_info = match self .signer_db @@ -526,9 +544,8 @@ impl Signer { { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); return None; - } else { - block_info } + block_info } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? @@ -565,11 +582,29 @@ impl Signer { ) -> Option { crate::monitoring::increment_block_validation_responses(false); let signer_signature_hash = block_validate_reject.signer_signature_hash; + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| { + proposal.block.header.signer_signature_hash() == signer_signature_hash + }) + .unwrap_or(false) + { + self.submitted_block_proposal = None; + } let mut block_info = match self .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) { - Ok(Some(block_info)) => block_info, + Ok(Some(block_info)) => { + if block_info.state == BlockState::GloballyRejected + || block_info.state == BlockState::GloballyAccepted + { + debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); + return None; + } + block_info + } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); @@ -632,6 +667,81 @@ impl Signer { } } + /// Check the current tracked submitted block proposal to see if it has timed out. + /// Broadcasts a rejection and marks the block locally rejected if it has. + fn check_submitted_block_proposal(&mut self) { + let Some((block_proposal, block_submission)) = self.submitted_block_proposal.take() else { + // Nothing to check. + return; + }; + if block_submission.elapsed() < self.block_proposal_validation_timeout { + // Not expired yet. Put it back! + self.submitted_block_proposal = Some((block_proposal, block_submission)); + return; + } + let signature_sighash = block_proposal.block.header.signer_signature_hash(); + // For mutability reasons, we need to take the block_info out of the map and add it back after processing + let mut block_info = match self + .signer_db + .block_lookup(self.reward_cycle, &signature_sighash) + { + Ok(Some(block_info)) => { + if block_info.state == BlockState::GloballyRejected + || block_info.state == BlockState::GloballyAccepted + { + // The block has already reached consensus. + return; + } + block_info + } + Ok(None) => { + // This is weird. If this is reached, its probably an error in code logic or the db was flushed. + // Why are we tracking a block submission for a block we have never seen / stored before. + error!("{self}: tracking an unknown block validation submission."; + "signer_sighash" => %signature_sighash, + "block_id" => %block_proposal.block.block_id(), + ); + return; + } + Err(e) => { + error!("{self}: Failed to lookup block in signer db: {e:?}",); + return; + } + }; + // We cannot determine the validity of the block, but we have not reached consensus on it yet. + // Reject it so we aren't holding up the network because of our inaction. + warn!( + "{self}: Failed to receive block validation response within {} ms. Rejecting block.", self.block_proposal_validation_timeout.as_millis(); + "signer_sighash" => %signature_sighash, + "block_id" => %block_proposal.block.block_id(), + ); + let rejection = BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::ConnectivityIssues, + &self.private_key, + self.mainnet, + ); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + debug!("{self}: Broadcasting a block response to stacks node: {rejection:?}"); + let res = self + .stackerdb + .send_message_with_retry::(rejection.into()); + + match res { + Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), + Ok(ack) if !ack.accepted => warn!( + "{self}: Block rejection not accepted by stacker-db: {:?}", + ack.reason + ), + Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), + } + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + } + /// Compute the signing weight, given a list of signatures fn compute_signature_signing_weight<'a>( &self, @@ -738,6 +848,15 @@ impl Signer { error!("{self}: Failed to update block state: {e:?}",); panic!("{self} Failed to update block state: {e}"); } + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| &proposal.block.header.signer_signature_hash() == block_hash) + .unwrap_or(false) + { + // Consensus reached! No longer bother tracking its validation submission to the node as we are too late to participate in the decision anyway. + self.submitted_block_proposal = None; + } } /// Handle an observed signature from another signer @@ -880,6 +999,15 @@ impl Signer { } } self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| &proposal.block.header.signer_signature_hash() == block_hash) + .unwrap_or(false) + { + // Consensus reached! No longer bother tracking its validation submission to the node as we are too late to participate in the decision anyway. + self.submitted_block_proposal = None; + } } fn broadcast_signed_block( @@ -941,6 +1069,44 @@ impl Signer { false } + #[cfg(any(test, feature = "testing"))] + fn test_reject_block_proposal( + &mut self, + block_proposal: &BlockProposal, + block_info: &mut BlockInfo, + block_response: Option, + ) -> Option { + let Some(public_keys) = &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() else { + return block_response; + }; + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject + // as invalid since we rejected in a prior round if this crops up again) + // in case this is the first time we saw this block. Safe to do since this is testing case only. + self.signer_db + .insert_block(block_info) + .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::TestingDirective, + &self.private_key, + self.mainnet, + )) + } else { + None + } + } + /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index a5ecaa0458..c1d07994d7 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -702,6 +702,10 @@ impl Burnchain { } pub fn get_burnchaindb_path(&self) -> String { + if self.working_dir.as_str() == ":memory:" { + return ":memory:".to_string(); + } + let chainstate_dir = Burnchain::get_chainstate_path_str(&self.working_dir); let mut db_pathbuf = PathBuf::from(&chainstate_dir); db_pathbuf.push("burnchain.sqlite"); @@ -743,12 +747,14 @@ impl Burnchain { /// Open just the burnchain database pub fn open_burnchain_db(&self, readwrite: bool) -> Result { let burnchain_db_path = self.get_burnchaindb_path(); - if let Err(e) = fs::metadata(&burnchain_db_path) { - warn!( - "Failed to stat burnchain DB path '{}': {:?}", - &burnchain_db_path, &e - ); - return Err(burnchain_error::DBError(db_error::NoDBError)); + if burnchain_db_path != ":memory:" { + if let Err(e) = fs::metadata(&burnchain_db_path) { + warn!( + "Failed to stat burnchain DB path '{}': {:?}", + &burnchain_db_path, &e + ); + return Err(burnchain_error::DBError(db_error::NoDBError)); + } } test_debug!( "Open burnchain DB at {} (rw? {})", diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 72ca2e8bf1..d5f1e18804 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -1000,33 +1000,38 @@ impl BurnchainDB { readwrite: bool, ) -> Result { let mut create_flag = false; - let open_flags = match fs::metadata(path) { - Err(e) => { - if e.kind() == io::ErrorKind::NotFound { - // need to create - if readwrite { - create_flag = true; - let ppath = Path::new(path); - let pparent_path = ppath - .parent() - .unwrap_or_else(|| panic!("BUG: no parent of '{}'", path)); - fs::create_dir_all(&pparent_path) - .map_err(|e| BurnchainError::from(DBError::IOError(e)))?; - - OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE + let open_flags = if path == ":memory:" { + create_flag = true; + OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE + } else { + match fs::metadata(path) { + Err(e) => { + if e.kind() == io::ErrorKind::NotFound { + // need to create + if readwrite { + create_flag = true; + let ppath = Path::new(path); + let pparent_path = ppath + .parent() + .unwrap_or_else(|| panic!("BUG: no parent of '{}'", path)); + fs::create_dir_all(&pparent_path) + .map_err(|e| BurnchainError::from(DBError::IOError(e)))?; + + OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE + } else { + return Err(BurnchainError::from(DBError::NoDBError)); + } } else { - return Err(BurnchainError::from(DBError::NoDBError)); + return Err(BurnchainError::from(DBError::IOError(e))); } - } else { - return Err(BurnchainError::from(DBError::IOError(e))); } - } - Ok(_md) => { - // can just open - if readwrite { - OpenFlags::SQLITE_OPEN_READ_WRITE - } else { - OpenFlags::SQLITE_OPEN_READ_ONLY + Ok(_md) => { + // can just open + if readwrite { + OpenFlags::SQLITE_OPEN_READ_WRITE + } else { + OpenFlags::SQLITE_OPEN_READ_ONLY + } } } }; @@ -1089,7 +1094,7 @@ impl BurnchainDB { let conn = sqlite_open(path, open_flags, true)?; let mut db = BurnchainDB { conn }; - if readwrite { + if readwrite || path == ":memory:" { db.add_indexes()?; } Ok(db) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index b8d0441591..d67de8e987 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -289,6 +289,14 @@ lazy_static! { ); "#, ]; + + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_5: [&'static str; 2] = [ + r#" + UPDATE db_config SET version = "8"; + "#, + // Add an index for index block hash in nakamoto block headers + "CREATE INDEX IF NOT EXISTS index_block_hash ON nakamoto_block_headers(index_block_hash);", + ]; } #[cfg(test)] @@ -320,7 +328,7 @@ pub trait StacksDBIndexed { fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError>; fn sqlite(&self) -> &Connection; - /// Get the ancestor block hash given a height + /// Get the ancestor block hash given a coinbase height fn get_ancestor_block_id( &mut self, coinbase_height: u64, diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 160e2dc60e..6b6f523f88 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -55,7 +55,7 @@ use crate::chainstate::burn::{ConsensusHash, ConsensusHashExtensions}; use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, NAKAMOTO_CHAINSTATE_SCHEMA_2, - NAKAMOTO_CHAINSTATE_SCHEMA_3, NAKAMOTO_CHAINSTATE_SCHEMA_4, + NAKAMOTO_CHAINSTATE_SCHEMA_3, NAKAMOTO_CHAINSTATE_SCHEMA_4, NAKAMOTO_CHAINSTATE_SCHEMA_5, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -299,14 +299,14 @@ impl DBConfig { }); match epoch_id { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 7, - StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 7, - StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 8, + StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 8, + StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 8, } } } @@ -680,7 +680,7 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &'static str = "7"; +pub const CHAINSTATE_VERSION: &'static str = "8"; const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ "PRAGMA foreign_keys = ON;", @@ -1087,28 +1087,24 @@ impl StacksChainState { while db_config.version != CHAINSTATE_VERSION { match db_config.version.as_str() { "1" => { - // migrate to 2 info!("Migrating chainstate schema from version 1 to 2"); for cmd in CHAINSTATE_SCHEMA_2.iter() { tx.execute_batch(cmd)?; } } "2" => { - // migrate to 3 info!("Migrating chainstate schema from version 2 to 3"); for cmd in CHAINSTATE_SCHEMA_3.iter() { tx.execute_batch(cmd)?; } } "3" => { - // migrate to nakamoto 1 info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { tx.execute_batch(cmd)?; } } "4" => { - // migrate to nakamoto 2 info!( "Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo" ); @@ -1117,14 +1113,12 @@ impl StacksChainState { } } "5" => { - // migrate to nakamoto 3 info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { tx.execute_batch(cmd)?; } } "6" => { - // migrate to nakamoto 3 info!( "Migrating chainstate schema from version 6 to 7: adds signer_stats table" ); @@ -1132,6 +1126,14 @@ impl StacksChainState { tx.execute_batch(cmd)?; } } + "7" => { + info!( + "Migrating chainstate schema from version 7 to 8: add index for nakamoto block headers" + ); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_5.iter() { + tx.execute_batch(cmd)?; + } + } _ => { error!( "Invalid chain state database: expected version = {}, got {}", diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 35ba532667..e9de9139a2 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -578,6 +578,7 @@ impl StacksChainState { post_condition_mode: &TransactionPostConditionMode, origin_account: &StacksAccount, asset_map: &AssetMap, + txid: Txid, ) -> Result { let mut checked_fungible_assets: HashMap> = HashMap::new(); @@ -606,7 +607,7 @@ impl StacksChainState { if !condition_code.check(u128::from(*amount_sent_condition), amount_sent) { info!( "Post-condition check failure on STX owned by {}: {:?} {:?} {}", - account_principal, amount_sent_condition, condition_code, amount_sent + account_principal, amount_sent_condition, condition_code, amount_sent; "txid" => %txid ); return Ok(false); } @@ -650,7 +651,7 @@ impl StacksChainState { .get_fungible_tokens(&account_principal, &asset_id) .unwrap_or(0); if !condition_code.check(u128::from(*amount_sent_condition), amount_sent) { - info!("Post-condition check failure on fungible asset {} owned by {}: {} {:?} {}", &asset_id, account_principal, amount_sent_condition, condition_code, amount_sent); + info!("Post-condition check failure on fungible asset {} owned by {}: {} {:?} {}", &asset_id, account_principal, amount_sent_condition, condition_code, amount_sent; "txid" => %txid); return Ok(false); } @@ -684,7 +685,7 @@ impl StacksChainState { .get_nonfungible_tokens(&account_principal, &asset_id) .unwrap_or(&empty_assets); if !condition_code.check(asset_value, assets_sent) { - info!("Post-condition check failure on non-fungible asset {} owned by {}: {:?} {:?}", &asset_id, account_principal, &asset_value, condition_code); + info!("Post-condition check failure on non-fungible asset {} owned by {}: {:?} {:?}", &asset_id, account_principal, &asset_value, condition_code; "txid" => %txid); return Ok(false); } @@ -726,18 +727,18 @@ impl StacksChainState { // each value must be covered for v in values { if !nfts.contains(&v.clone().try_into()?) { - info!("Post-condition check failure: Non-fungible asset {} value {:?} was moved by {} but not checked", &asset_identifier, &v, &principal); + info!("Post-condition check failure: Non-fungible asset {} value {:?} was moved by {} but not checked", &asset_identifier, &v, &principal; "txid" => %txid); return Ok(false); } } } else { // no values covered - info!("Post-condition check failure: No checks for non-fungible asset type {} moved by {}", &asset_identifier, &principal); + info!("Post-condition check failure: No checks for non-fungible asset type {} moved by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } else { // no NFT for this principal - info!("Post-condition check failure: No checks for any non-fungible assets, but moved {} by {}", &asset_identifier, &principal); + info!("Post-condition check failure: No checks for any non-fungible assets, but moved {} by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } @@ -747,11 +748,11 @@ impl StacksChainState { checked_fungible_assets.get(&principal) { if !checked_ft_asset_ids.contains(&asset_identifier) { - info!("Post-condition check failure: checks did not cover transfer of {} by {}", &asset_identifier, &principal); + info!("Post-condition check failure: checks did not cover transfer of {} by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } else { - info!("Post-condition check failure: No checks for fungible token type {} moved by {}", &asset_identifier, &principal); + info!("Post-condition check failure: No checks for fungible token type {} moved by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } @@ -980,14 +981,14 @@ impl StacksChainState { // Their presence in this variant makes the transaction invalid. if tx.post_conditions.len() > 0 { let msg = format!("Invalid Stacks transaction: TokenTransfer transactions do not support post-conditions"); - info!("{}", &msg); + info!("{}", &msg; "txid" => %tx.txid()); return Err(Error::InvalidStacksTransaction(msg, false)); } if *addr == origin_account.principal { let msg = format!("Invalid TokenTransfer: address tried to send to itself"); - info!("{}", &msg); + info!("{}", &msg; "txid" => %tx.txid()); return Err(Error::InvalidStacksTransaction(msg, false)); } @@ -1039,6 +1040,7 @@ impl StacksChainState { &tx.post_condition_mode, origin_account, asset_map, + tx.txid(), ) .expect("FATAL: error while evaluating post-conditions") }, @@ -1274,6 +1276,7 @@ impl StacksChainState { &tx.post_condition_mode, origin_account, asset_map, + tx.txid(), ) .expect("FATAL: error while evaluating post-conditions") }, @@ -6873,6 +6876,7 @@ pub mod test { mode, origin, &ft_transfer_2, + Txid([0; 32]), ) .unwrap(); if result != expected_result { @@ -7226,6 +7230,7 @@ pub mod test { mode, origin, &nft_transfer_2, + Txid([0; 32]), ) .unwrap(); if result != expected_result { @@ -8043,6 +8048,7 @@ pub mod test { post_condition_mode, origin_account, asset_map, + Txid([0; 32]), ) .unwrap(); if result != expected_result { diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 7fb08335a2..c3b60c5da8 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -585,7 +585,7 @@ impl TransactionResult { // recover original ClarityError ClarityRuntimeTxError::Acceptable { error, .. } => { if let clarity_error::Parse(ref parse_err) = error { - info!("Parse error: {}", parse_err); + info!("Parse error: {}", parse_err; "txid" => %tx.txid()); match &parse_err.err { ParseErrors::ExpressionStackDepthTooDeep | ParseErrors::VaryExpressionStackDepthTooDeep => { diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 8af9cf6ec7..fd370a8b12 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1101,13 +1101,12 @@ pub const MAX_MICROBLOCK_SIZE: u32 = 65536; #[cfg(test)] pub mod test { - use clarity::util::get_epoch_time_secs; use clarity::vm::representations::{ClarityName, ContractName}; use clarity::vm::ClarityVersion; use stacks_common::bitvec::BitVec; use stacks_common::util::hash::*; - use stacks_common::util::log; use stacks_common::util::secp256k1::Secp256k1PrivateKey; + use stacks_common::util::{get_epoch_time_secs, log}; use super::*; use crate::chainstate::burn::BlockSnapshot; diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 8b66c019f0..9a6a84507e 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -81,7 +81,7 @@ pub fn path_join(dir: &str, path: &str) -> String { // copy src to dest pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { - eprintln!("Copy directory {} to {}", src_dir, dest_dir); + eprintln!("Copy directory {src_dir} to {dest_dir}"); let mut dir_queue = VecDeque::new(); dir_queue.push_back("/".to_string()); @@ -91,7 +91,7 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { let next_src_dir = path_join(&src_dir, &next_dir); let next_dest_dir = path_join(&dest_dir, &next_dir); - eprintln!("mkdir {}", &next_dest_dir); + eprintln!("mkdir {next_dest_dir}"); fs::create_dir_all(&next_dest_dir)?; for dirent_res in fs::read_dir(&next_src_dir)? { @@ -100,11 +100,11 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { let md = fs::metadata(&path)?; if md.is_dir() { let frontier = path_join(&next_dir, &dirent.file_name().to_str().unwrap()); - eprintln!("push {}", &frontier); + eprintln!("push {frontier}"); dir_queue.push_back(frontier); } else { let dest_path = path_join(&next_dest_dir, &dirent.file_name().to_str().unwrap()); - eprintln!("copy {} to {}", &path.to_str().unwrap(), &dest_path); + eprintln!("copy {} to {dest_path}", &path.to_str().unwrap()); fs::copy(path, dest_path)?; } } @@ -475,14 +475,14 @@ impl TestStacksNode { }; if StacksChainState::has_stored_block( - &self.chainstate.db(), + self.chainstate.db(), &self.chainstate.blocks_path, &consensus_hash, &bc.block_header_hash, ) .unwrap() && !StacksChainState::is_block_orphaned( - &self.chainstate.db(), + self.chainstate.db(), &consensus_hash, &bc.block_header_hash, ) @@ -583,11 +583,10 @@ impl TestStacksNode { ); test_debug!( - "Miner {}: Block commit transaction builds on {},{} (parent snapshot is {:?})", + "Miner {}: Block commit transaction builds on {},{} (parent snapshot is {parent_block_snapshot_opt:?})", miner.id, block_commit_op.parent_block_ptr, - block_commit_op.parent_vtxindex, - &parent_block_snapshot_opt + block_commit_op.parent_vtxindex ); self.commit_ops.insert( block_commit_op.block_header_hash.clone(), @@ -767,16 +766,15 @@ pub fn preprocess_stacks_block_data( { Some(sn) => sn, None => { - test_debug!("Block commit did not win sorition: {:?}", block_commit_op); + test_debug!("Block commit did not win sorition: {block_commit_op:?}"); return None; } }; // "discover" this stacks block test_debug!( - "\n\nPreprocess Stacks block {}/{} ({})", + "\n\nPreprocess Stacks block {}/{block_hash} ({})", &commit_snapshot.consensus_hash, - &block_hash, StacksBlockHeader::make_index_block_hash(&commit_snapshot.consensus_hash, &block_hash) ); let block_res = node @@ -793,8 +791,7 @@ pub fn preprocess_stacks_block_data( // "discover" this stacks microblock stream for mblock in stacks_microblocks.iter() { test_debug!( - "Preprocess Stacks microblock {}-{} (seq {})", - &block_hash, + "Preprocess Stacks microblock {block_hash}-{} (seq {})", mblock.block_hash(), mblock.header.sequence ); @@ -828,11 +825,9 @@ pub fn check_block_state_index_root( .read_block_root_hash(&index_block_hash) .unwrap(); test_debug!( - "checking {}/{} state root: expecting {}, got {}", - consensus_hash, + "checking {consensus_hash}/{} state root: expecting {}, got {state_root}", &stacks_header.block_hash(), - &stacks_header.state_index_root, - &state_root + &stacks_header.state_index_root ); state_root == stacks_header.state_index_root } @@ -888,9 +883,8 @@ pub fn check_mining_reward( let mut total: u128 = 10_000_000_000 - spent_total; test_debug!( - "Miner {} has spent {} in total so far", - &miner.origin_address().unwrap(), - spent_total + "Miner {} has spent {spent_total} in total so far", + &miner.origin_address().unwrap() ); if block_height >= MINER_REWARD_MATURITY { @@ -908,13 +902,10 @@ pub fn check_mining_reward( let reward = recipient.coinbase + anchored + (3 * streamed / 5); test_debug!( - "Miner {} received a reward {} = {} + {} + {} at block {}", + "Miner {} received a reward {reward} = {} + {anchored} + {} at block {i}", &recipient.address.to_string(), - reward, recipient.coinbase, - anchored, (3 * streamed / 5), - i ); total += reward; found = true; @@ -922,9 +913,8 @@ pub fn check_mining_reward( } if !found { test_debug!( - "Miner {} received no reward at block {}", - miner.origin_address().unwrap(), - i + "Miner {} received no reward at block {i}", + miner.origin_address().unwrap() ); } } @@ -945,11 +935,9 @@ pub fn check_mining_reward( &parent_reward.block_hash, ); test_debug!( - "Miner {} received a produced-stream reward {} from {} confirmed at {}", + "Miner {} received a produced-stream reward {parent_streamed} from {} confirmed at {confirmed_block_height}", miner.origin_address().unwrap().to_string(), - parent_streamed, - heights.get(&parent_ibh).unwrap(), - confirmed_block_height + heights.get(&parent_ibh).unwrap() ); total += parent_streamed; } @@ -967,7 +955,7 @@ pub fn check_mining_reward( return total == 0; } else { if amount != total { - test_debug!("Amount {} != {}", amount, total); + test_debug!("Amount {amount} != {total}"); return false; } return true; @@ -1091,16 +1079,14 @@ pub fn make_smart_contract_with_version( (begin (var-set bar (/ x y)) (ok (var-get bar))))"; test_debug!( - "Make smart contract block at hello-world-{}-{}", - burnchain_height, - stacks_block_height + "Make smart contract block at hello-world-{burnchain_height}-{stacks_block_height}" ); let mut tx_contract = StacksTransaction::new( TransactionVersion::Testnet, miner.as_transaction_auth().unwrap(), TransactionPayload::new_smart_contract( - &format!("hello-world-{}-{}", burnchain_height, stacks_block_height), + &format!("hello-world-{burnchain_height}-{stacks_block_height}"), &contract.to_string(), version, ) @@ -1140,7 +1126,7 @@ pub fn make_contract_call( miner.as_transaction_auth().unwrap(), TransactionPayload::new_contract_call( addr.clone(), - &format!("hello-world-{}-{}", burnchain_height, stacks_block_height), + &format!("hello-world-{burnchain_height}-{stacks_block_height}"), "set-bar", vec![Value::Int(arg1), Value::Int(arg2)], ) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 9ff6e55644..587daee787 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -528,8 +528,6 @@ fn replay_block( fn replay_block_nakamoto( sort_db: &mut SortitionDB, stacks_chain_state: &mut StacksChainState, - mut chainstate_tx: ChainstateTx, - clarity_instance: &mut ClarityInstance, block: &NakamotoBlock, block_size: u64, ) -> Result<(), ChainstateError> { @@ -785,7 +783,7 @@ fn replay_block_nakamoto( return Err(e); }; - let (receipt, clarity_commit, reward_set_data) = ok_opt.expect("FATAL: unreachable"); + let (receipt, _clarity_commit, _reward_set_data) = ok_opt.expect("FATAL: unreachable"); assert_eq!( receipt.header.anchored_header.block_hash(), diff --git a/stackslib/src/net/api/getattachmentsinv.rs b/stackslib/src/net/api/getattachmentsinv.rs index 2ea73baf04..b7fe94baf1 100644 --- a/stackslib/src/net/api/getattachmentsinv.rs +++ b/stackslib/src/net/api/getattachmentsinv.rs @@ -96,11 +96,10 @@ impl HttpRequest for RPCGetAttachmentsInvRequestHandler { if key == "index_block_hash" { index_block_hash = StacksBlockId::from_hex(&value).ok(); } else if key == "pages_indexes" { - if let Ok(pages_indexes_value) = value.parse::() { - for entry in pages_indexes_value.split(',') { - if let Ok(page_index) = entry.parse::() { - page_indexes.insert(page_index); - } + let pages_indexes_value = value.to_string(); + for entry in pages_indexes_value.split(',') { + if let Ok(page_index) = entry.parse::() { + page_indexes.insert(page_index); } } } diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 517105515c..b67b6166aa 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -343,6 +343,17 @@ impl NakamotoBlockProposal { sortdb: &SortitionDB, chainstate: &mut StacksChainState, // not directly used; used as a handle to open other chainstates ) -> Result { + #[cfg(any(test, feature = "testing"))] + { + if *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block validation is stalled due to testing directive."); + while *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block validation is no longer stalled due to testing directive."); + } + } let ts_start = get_epoch_time_ms(); // Measure time from start of function let time_elapsed = || get_epoch_time_ms().saturating_sub(ts_start); @@ -533,24 +544,6 @@ impl NakamotoBlockProposal { }); } - #[cfg(any(test, feature = "testing"))] - { - if *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block validation is stalled due to testing directive."; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - ); - while *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Block validation is no longer stalled due to testing directive."; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - ); - } - } - info!( "Participant: validated anchored block"; "block_header_hash" => %computed_block_header_hash, diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 5cf32a8a56..273c1c7335 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -3069,6 +3069,7 @@ mod test { use std::io::prelude::*; use std::io::{Read, Write}; use std::net::{SocketAddr, SocketAddrV4}; + use std::path::PathBuf; use clarity::vm::costs::ExecutionCost; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId}; @@ -3080,6 +3081,7 @@ mod test { use super::*; use crate::burnchains::bitcoin::keys::BitcoinPublicKey; use crate::burnchains::burnchain::*; + use crate::burnchains::db::BurnchainDB; use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::*; @@ -3123,6 +3125,8 @@ mod test { let peerdb_path = format!("{}/peers.sqlite", &test_path); let stackerdb_path = format!("{}/stackerdb.sqlite", &test_path); let chainstate_path = format!("{}/chainstate", &test_path); + let burnchain_db = + BurnchainDB::connect(&burnchain.get_burnchaindb_path(), burnchain, true).unwrap(); let mut peerdb = PeerDB::connect( &peerdb_path, @@ -3314,12 +3318,14 @@ mod test { let atlasdb = AtlasDB::connect(atlas_config, &atlasdb_path, true).unwrap(); let stackerdbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); let peerdb = PeerDB::open(&peerdb_path, true).unwrap(); + let burnchain_db = burnchain.open_burnchain_db(false).unwrap(); let local_peer = PeerDB::get_local_peer(peerdb.conn()).unwrap(); let network = PeerNetwork::new( peerdb, atlasdb, stackerdbs, + burnchain_db, local_peer, peer_version, burnchain.clone(), @@ -3331,7 +3337,7 @@ mod test { network } - fn testing_burnchain_config() -> Burnchain { + fn testing_burnchain_config(test_name: &str) -> Burnchain { let first_burn_hash = BurnchainHeaderHash::from_hex( "0000000000000000000000000000000000000000000000000000000000000000", ) @@ -3342,7 +3348,7 @@ mod test { network_id: 0, chain_name: "bitcoin".to_string(), network_name: "testnet".to_string(), - working_dir: "/nope".to_string(), + working_dir: format!("/tmp/stacks-test-databases-{}", test_name), consensus_hash_lifetime: 24, stable_confirmations: 7, first_block_height: 12300, @@ -3366,8 +3372,6 @@ mod test { let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); - let burnchain = testing_burnchain_config(); - let mut chain_view_1 = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -3397,10 +3401,13 @@ mod test { &peer_2_rc_consensus_hash ); + let burnchain_1 = testing_burnchain_config(&test_name_1); + let burnchain_2 = testing_burnchain_config(&test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -3411,7 +3418,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -3422,7 +3429,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -3431,7 +3438,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -3445,7 +3452,7 @@ mod test { peerdb_1 .update_local_peer( 0x9abcdef0, - burnchain.network_id, + burnchain_1.network_id, local_peer_1.data_url, local_peer_1.port, &[ @@ -3458,7 +3465,7 @@ mod test { peerdb_2 .update_local_peer( 0x9abcdef0, - burnchain.network_id, + burnchain_2.network_id, local_peer_2.data_url, local_peer_2.port, &[ @@ -3490,7 +3497,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -3500,7 +3507,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -3708,8 +3715,6 @@ mod test { let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -3723,10 +3728,13 @@ mod test { let test_name_1 = "convo_handshake_accept_1"; let test_name_2 = "convo_handshake_accept_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -3737,7 +3745,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -3748,7 +3756,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -3757,7 +3765,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -3771,7 +3779,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -3781,7 +3789,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -3887,8 +3895,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -3902,10 +3908,13 @@ mod test { let test_name_1 = "convo_handshake_reject_1"; let test_name_2 = "convo_handshake_reject_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -3916,7 +3925,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -3927,7 +3936,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -3936,7 +3945,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -3950,7 +3959,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -3960,7 +3969,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4026,8 +4035,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4046,10 +4053,13 @@ mod test { let test_name_1 = "convo_handshake_badsignature_1"; let test_name_2 = "convo_handshake_badsignature_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4060,7 +4070,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4071,7 +4081,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4080,7 +4090,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4094,7 +4104,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4104,7 +4114,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4169,8 +4179,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4189,10 +4197,13 @@ mod test { let test_name_1 = "convo_handshake_badpeeraddress_1"; let test_name_2 = "convo_handshake_badpeeraddress_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4203,7 +4214,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4214,7 +4225,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4223,7 +4234,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4237,7 +4248,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4247,7 +4258,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4330,8 +4341,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4345,10 +4354,13 @@ mod test { let test_name_1 = "convo_handshake_update_key_1"; let test_name_2 = "convo_handshake_update_key_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4359,7 +4371,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4370,7 +4382,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4379,7 +4391,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4393,7 +4405,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4403,7 +4415,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4523,8 +4535,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4543,10 +4553,13 @@ mod test { let test_name_1 = "convo_handshake_self_1"; let test_name_2 = "convo_handshake_self_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4557,7 +4570,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4568,7 +4581,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4577,7 +4590,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4591,7 +4604,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4601,7 +4614,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4666,8 +4679,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4686,10 +4697,13 @@ mod test { let test_name_1 = "convo_ping_1"; let test_name_2 = "convo_ping_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4700,7 +4714,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4711,7 +4725,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4720,7 +4734,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4734,7 +4748,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4744,7 +4758,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4841,8 +4855,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4861,10 +4873,13 @@ mod test { let test_name_1 = "convo_handshake_ping_loop_1"; let test_name_2 = "convo_handshake_ping_loop_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4875,7 +4890,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4886,7 +4901,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4895,7 +4910,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4909,7 +4924,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4919,7 +4934,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5067,8 +5082,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5087,10 +5100,13 @@ mod test { let test_name_1 = "convo_nack_unsolicited_1"; let test_name_2 = "convo_nack_unsolicited_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5101,7 +5117,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5112,7 +5128,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5121,7 +5137,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5135,7 +5151,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5145,7 +5161,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5216,8 +5232,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5235,10 +5249,14 @@ mod test { let test_name_1 = "convo_ignore_unsolicited_handshake_1"; let test_name_2 = "convo_ignore_unsolicited_handshake_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5249,7 +5267,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5260,7 +5278,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5269,7 +5287,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5283,7 +5301,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5293,7 +5311,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5390,8 +5408,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12331, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5404,10 +5420,14 @@ mod test { let test_name_1 = "convo_handshake_getblocksinv_1"; let test_name_2 = "convo_handshake_getblocksinv_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5418,7 +5438,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5429,7 +5449,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5438,7 +5458,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5452,7 +5472,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5462,7 +5482,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5667,8 +5687,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12331, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5681,10 +5699,14 @@ mod test { let test_name_1 = "convo_handshake_getnakamotoinv_1"; let test_name_2 = "convo_handshake_getnakamotoinv_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5695,7 +5717,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5706,7 +5728,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5715,7 +5737,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5729,7 +5751,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5739,7 +5761,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5940,8 +5962,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5959,10 +5979,14 @@ mod test { let test_name_1 = "convo_natpunch_1"; let test_name_2 = "convo_natpunch_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12352, "http://peer1.com".into(), @@ -5973,7 +5997,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12353, "http://peer2.com".into(), @@ -5984,7 +6008,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5993,7 +6017,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -6007,7 +6031,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -6017,7 +6041,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -6081,8 +6105,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6094,6 +6116,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "convo_is_preamble_valid"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, chainstate_1) = make_test_chain_dbs( test_name_1, @@ -6362,7 +6386,7 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); + let burnchain = testing_burnchain_config("unused"); let mut chain_view = BurnchainView { burn_block_height: 12348, @@ -6748,8 +6772,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6761,6 +6783,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "sign_relay_forward_message_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -6866,8 +6890,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6879,6 +6901,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "sign_and_forward_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -6933,8 +6957,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6946,6 +6968,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_block_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -7067,8 +7091,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -7080,6 +7102,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_transaction_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -7201,8 +7225,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -7214,6 +7236,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_microblocks_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -7335,8 +7359,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -7348,6 +7370,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_stackerdb_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 85fe9d7494..4eeec0daaf 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -474,6 +474,8 @@ pub struct ConnectionOptions { /// the reward cycle in which Nakamoto activates, and thus needs to run both the epoch /// 2.x and Nakamoto state machines. pub force_nakamoto_epoch_transition: bool, + /// Reject blocks that were pushed + pub reject_blocks_pushed: bool, // test facilitation /// Do not require that an unsolicited message originate from an authenticated, connected @@ -583,6 +585,7 @@ impl std::default::Default for ConnectionOptions { disable_stackerdb_sync: false, force_disconnect_interval: None, force_nakamoto_epoch_transition: false, + reject_blocks_pushed: false, // no test facilitations on by default test_disable_unsolicited_message_authentication: false, diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 02ed8b9419..42d228aca1 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -68,6 +68,9 @@ use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; +/// How often to check for unconfirmed tenures +const CHECK_UNCONFIRMED_TENURES_MS: u128 = 1_000; + /// The overall downloader can operate in one of two states: /// * it's doing IBD, in which case it's downloading tenures using neighbor inventories and /// the start/end block ID hashes obtained from block-commits. This works up until the last two @@ -118,6 +121,10 @@ pub struct NakamotoDownloadStateMachine { pub(super) neighbor_rpc: NeighborRPC, /// Nakamoto chain tip nakamoto_tip: StacksBlockId, + /// do we need to fetch unconfirmed tenures? + fetch_unconfirmed_tenures: bool, + /// last time an unconfirmed tenures was checked + last_unconfirmed_download_check_ms: u128, /// last time an unconfirmed downloader was run last_unconfirmed_download_run_ms: u128, } @@ -139,6 +146,8 @@ impl NakamotoDownloadStateMachine { unconfirmed_tenure_downloads: HashMap::new(), neighbor_rpc: NeighborRPC::new(), nakamoto_tip, + fetch_unconfirmed_tenures: false, + last_unconfirmed_download_check_ms: 0, last_unconfirmed_download_run_ms: 0, } } @@ -465,142 +474,6 @@ impl NakamotoDownloadStateMachine { Ok(()) } - /// Determine if the set of `TenureStartEnd`s represents available but unfetched data. Used to - /// determine whether or not to update the set of wanted tenures -- we don't want to skip - /// fetching wanted tenures if they're still available! - pub(crate) fn have_unprocessed_tenures<'a>( - first_nakamoto_rc: u64, - completed_tenures: &HashSet, - prev_wanted_tenures: &[WantedTenure], - tenure_block_ids: &HashMap, - pox_constants: &PoxConstants, - first_burn_height: u64, - inventory_iter: impl Iterator, - ) -> bool { - if prev_wanted_tenures.is_empty() { - debug!("prev_wanted_tenures is empty, so we have unprocessed tenures"); - return true; - } - - // the anchor block for prev_wanted_tenures must not only be processed, but also we have to - // have seen an inventory message from the subsequent reward cycle. If we can see - // inventory messages for the reward cycle after `prev_wanted_rc`, then the former will be - // true - let prev_wanted_rc = prev_wanted_tenures - .last() - .map(|wt| { - downloader_block_height_to_reward_cycle( - pox_constants, - first_burn_height, - wt.burn_height, - ) - .expect("FATAL: wanted tenure before system start") - }) - .unwrap_or(u64::MAX); - - let cur_wanted_rc = prev_wanted_rc.saturating_add(1); - - debug!( - "have_unprocessed_tenures: prev_wanted_rc = {}, cur_wanted_rc = {}", - prev_wanted_rc, cur_wanted_rc - ); - - let mut has_prev_inv = false; - let mut has_cur_inv = false; - let mut num_invs = 0; - for inv in inventory_iter { - num_invs += 1; - if prev_wanted_rc < first_nakamoto_rc { - // assume the epoch 2.x inventory has this - has_prev_inv = true; - } else if inv.tenures_inv.get(&prev_wanted_rc).is_some() { - has_prev_inv = true; - } - - if cur_wanted_rc < first_nakamoto_rc { - // assume the epoch 2.x inventory has this - has_cur_inv = true; - } else if inv.tenures_inv.get(&cur_wanted_rc).is_some() { - has_cur_inv = true; - } - } - - if !has_prev_inv || !has_cur_inv { - debug!("No peer has an inventory for either the previous ({}: available = {}) or current ({}: available = {}) wanted tenures. Total inventories: {}", prev_wanted_rc, has_prev_inv, cur_wanted_rc, has_cur_inv, num_invs); - return true; - } - - // the state machine updates `tenure_block_ids` _after_ `wanted_tenures`, so verify that - // this isn't a stale `tenure_block_ids` by checking that it contains at least one block in - // the prev_wanted_rc and at least one in the cur_wanted_rc - let mut has_prev_rc_block = false; - let mut has_cur_rc_block = false; - let mut available_considered = 0; - for (_naddr, available) in tenure_block_ids.iter() { - available_considered += available.len(); - debug!("Consider available tenures from {}", _naddr); - for (_ch, tenure_info) in available.iter() { - debug!("Consider tenure info for {}: {:?}", _ch, tenure_info); - if tenure_info.start_reward_cycle == prev_wanted_rc - || tenure_info.end_reward_cycle == prev_wanted_rc - { - has_prev_rc_block = true; - debug!( - "Consider tenure info for {}: have a tenure in prev reward cycle {}", - _ch, prev_wanted_rc - ); - } - if tenure_info.start_reward_cycle == cur_wanted_rc - || tenure_info.end_reward_cycle == cur_wanted_rc - { - has_cur_rc_block = true; - debug!( - "Consider tenure info for {}: have a tenure in cur reward cycle {}", - _ch, cur_wanted_rc - ); - } - } - } - - if available_considered > 0 - && ((prev_wanted_rc >= first_nakamoto_rc && !has_prev_rc_block) - || (cur_wanted_rc >= first_nakamoto_rc && !has_cur_rc_block)) - { - debug!( - "tenure_block_ids stale: missing representation in reward cycles {} ({}) and {} ({})", - prev_wanted_rc, - has_prev_rc_block, - cur_wanted_rc, - has_cur_rc_block, - ); - return true; - } - - let mut ret = false; - for (_naddr, available) in tenure_block_ids.iter() { - for wt in prev_wanted_tenures.iter() { - let Some(tenure_info) = available.get(&wt.tenure_id_consensus_hash) else { - continue; - }; - if completed_tenures.contains(&tenure_info.tenure_id_consensus_hash) { - // this check is necessary because the check for .processed requires that a - // child tenure block has been processed, which isn't guaranteed at a reward - // cycle boundary - debug!("Tenure {:?} has been fully downloaded", &tenure_info); - continue; - } - if !tenure_info.processed { - debug!( - "Tenure {:?} is available from {} but not processed", - &tenure_info, &_naddr - ); - ret = true; - } - } - } - ret - } - /// Update the state machine's wanted tenures and processed tenures, if it's time to do so. /// This will only happen when the sortition DB has finished processing a reward cycle of /// tenures when in IBD mode, _OR_ when the sortition tip advances when in steady-state mode. @@ -612,8 +485,7 @@ impl NakamotoDownloadStateMachine { /// cycle boundaries, where the sortition DB is about to begin processing a new reward cycle. /// The list of wanted tenures for the current reward cycle will be saved as /// `self.prev_wanted_tenures`, and the set of wanted tenures for the next reward cycle - /// will be stored to `self.wanted_tenures`. It will only update these two lists if it is safe - /// to do so, as determined by `have_unprocessed_tenures()`. + /// will be stored to `self.wanted_tenures`. /// /// In the second case (i.e. not a reward cycle boundary), this function will load up _new_ /// wanted tenure data and append it to `self.wanted_tenures` via @@ -1355,6 +1227,7 @@ impl NakamotoDownloadStateMachine { ) { Ok(blocks_opt) => blocks_opt, Err(NetError::StaleView) => { + neighbor_rpc.add_dead(network, &naddr); continue; } Err(e) => { @@ -1545,16 +1418,19 @@ impl NakamotoDownloadStateMachine { chainstate: &StacksChainState, ibd: bool, ) -> HashMap> { - debug!("NakamotoDownloadStateMachine in state {}", &self.state); + debug!( + "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}, state={}", + burnchain_height, network.burnchain_tip.block_height, &self.state; + "has_network_inventories" => network.inv_state_nakamoto.is_some(), + "next_unconfirmed_check" => self.last_unconfirmed_download_check_ms.saturating_add(CHECK_UNCONFIRMED_TENURES_MS) / 1000, + "timestamp_ms" => get_epoch_time_ms(), + ); + let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - debug!("No network inventories"); return HashMap::new(); }; - debug!( - "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}, state={}", - burnchain_height, network.burnchain_tip.block_height, &self.state - ); + self.update_available_tenures( &invs.inventories, &sortdb.pox_constants, @@ -1563,14 +1439,24 @@ impl NakamotoDownloadStateMachine { ); // check this now, since we mutate self.available - let need_unconfirmed_tenures = Self::need_unconfirmed_tenures( - burnchain_height, - &network.burnchain_tip, - &self.wanted_tenures, - self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), - &self.tenure_block_ids, - &self.available_tenures, - ); + self.fetch_unconfirmed_tenures = if self + .last_unconfirmed_download_check_ms + .saturating_add(CHECK_UNCONFIRMED_TENURES_MS) + > get_epoch_time_ms() + { + false + } else { + let do_fetch = Self::need_unconfirmed_tenures( + burnchain_height, + &network.burnchain_tip, + &self.wanted_tenures, + self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), + &self.tenure_block_ids, + &self.available_tenures, + ); + self.last_unconfirmed_download_check_ms = get_epoch_time_ms(); + do_fetch + }; match self.state { NakamotoDownloadState::Confirmed => { @@ -1580,7 +1466,7 @@ impl NakamotoDownloadStateMachine { .expect("FATAL: max_inflight_blocks exceeds usize::MAX"), ); - if self.tenure_downloads.is_empty() && need_unconfirmed_tenures { + if self.tenure_downloads.is_empty() && self.fetch_unconfirmed_tenures { debug!( "Transition from {} to {}", &self.state, @@ -1625,7 +1511,7 @@ impl NakamotoDownloadStateMachine { } else if self.unconfirmed_tenure_downloads.is_empty() && self.unconfirmed_tenure_download_schedule.is_empty() { - if need_unconfirmed_tenures { + if self.fetch_unconfirmed_tenures { // do this again self.unconfirmed_tenure_download_schedule = Self::make_unconfirmed_tenure_download_schedule( diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 53f9105156..ba1ac81033 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -98,6 +98,8 @@ impl WantedTenure { pub struct TenureStartEnd { /// Consensus hash that identifies the start of the tenure pub tenure_id_consensus_hash: ConsensusHash, + /// Burnchain block height of tenure ID consensus hash + pub tenure_id_burn_block_height: u64, /// Tenure-start block ID pub start_block_id: StacksBlockId, /// Last block ID @@ -119,6 +121,7 @@ pub type AvailableTenures = HashMap; impl TenureStartEnd { pub fn new( tenure_id_consensus_hash: ConsensusHash, + tenure_id_burn_block_height: u64, start_block_id: StacksBlockId, end_block_id: StacksBlockId, start_reward_cycle: u64, @@ -127,6 +130,7 @@ impl TenureStartEnd { ) -> Self { Self { tenure_id_consensus_hash, + tenure_id_burn_block_height, start_block_id, end_block_id, start_reward_cycle, @@ -214,6 +218,7 @@ impl TenureStartEnd { let tenure_start_end = TenureStartEnd::new( wt.tenure_id_consensus_hash.clone(), + wt.burn_height, wt_start.winning_block_id.clone(), wt_end.winning_block_id.clone(), rc, @@ -322,6 +327,7 @@ impl TenureStartEnd { let mut tenure_start_end = TenureStartEnd::new( wt.tenure_id_consensus_hash.clone(), + wt.burn_height, wt_start.winning_block_id.clone(), wt_end.winning_block_id.clone(), rc, diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 49b32c2634..b5514558b8 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -67,6 +67,35 @@ use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct CompletedTenure { + tenure_id: ConsensusHash, + start_block: StacksBlockId, + end_block: StacksBlockId, +} + +impl From<&TenureStartEnd> for CompletedTenure { + fn from(tse: &TenureStartEnd) -> Self { + Self { + tenure_id: tse.tenure_id_consensus_hash.clone(), + start_block: tse.start_block_id.clone(), + end_block: tse.end_block_id.clone(), + } + } +} + +impl From<&mut NakamotoTenureDownloader> for CompletedTenure { + fn from(ntd: &mut NakamotoTenureDownloader) -> Self { + Self { + tenure_id: ntd.tenure_id_consensus_hash, + start_block: ntd.tenure_start_block_id, + end_block: ntd.tenure_end_block_id, + } + } +} + +pub const PEER_DEPRIORITIZATION_TIME_SECS: u64 = 60; + /// A set of confirmed downloader state machines assigned to one or more neighbors. The block /// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure /// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer @@ -83,7 +112,14 @@ pub struct NakamotoTenureDownloaderSet { pub(crate) peers: HashMap, /// The set of tenures that have been successfully downloaded (but possibly not yet stored or /// processed) - pub(crate) completed_tenures: HashSet, + pub(crate) completed_tenures: HashSet, + /// Number of times a tenure download was attempted + pub(crate) attempted_tenures: HashMap, + /// Number of times a tenure download failed + pub(crate) attempt_failed_tenures: HashMap, + /// Peers that should be deprioritized because they're dead (maps to when they can be used + /// again) + pub(crate) deprioritized_peers: HashMap, } impl NakamotoTenureDownloaderSet { @@ -92,15 +128,51 @@ impl NakamotoTenureDownloaderSet { downloaders: vec![], peers: HashMap::new(), completed_tenures: HashSet::new(), + attempted_tenures: HashMap::new(), + attempt_failed_tenures: HashMap::new(), + deprioritized_peers: HashMap::new(), } } + /// Mark a tenure as having failed to download. + /// Implemented statically to appease the borrow checker. + fn mark_failure(attempt_failed_tenures: &mut HashMap, ch: &ConsensusHash) { + if let Some(failures) = attempt_failed_tenures.get_mut(ch) { + *failures = (*failures).saturating_add(1); + } else { + attempt_failed_tenures.insert(ch.clone(), 1); + } + } + + /// Mark a peer as deprioritized + /// Implemented statically to appease the borrow checker. + fn mark_deprioritized( + deprioritized_peers: &mut HashMap, + peer: &NeighborAddress, + ) { + deprioritized_peers.insert( + peer.clone(), + get_epoch_time_secs() + PEER_DEPRIORITIZATION_TIME_SECS, + ); + } + + /// Mark a peer and its tenure as dead and failed + fn mark_failed_and_deprioritize_peer( + attempted_failed_tenures: &mut HashMap, + deprioritized_peers: &mut HashMap, + ch: &ConsensusHash, + peer: &NeighborAddress, + ) { + Self::mark_failure(attempted_failed_tenures, ch); + Self::mark_deprioritized(deprioritized_peers, peer); + } + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if /// needed. fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, &naddr + "Add downloader for tenure {} driven by {naddr}", + &downloader.tenure_id_consensus_hash ); if let Some(idx) = self.peers.get(&naddr) { self.downloaders[*idx] = Some(downloader); @@ -154,7 +226,7 @@ impl NakamotoTenureDownloaderSet { ) { for (naddr, downloader) in iter { if self.has_downloader(&naddr) { - debug!("Already have downloader for {}", &naddr); + debug!("Already have downloader for {naddr}"); continue; } self.add_downloader(naddr, downloader); @@ -180,15 +252,6 @@ impl NakamotoTenureDownloaderSet { cnt } - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. pub fn is_empty(&self) -> bool { for downloader_opt in self.downloaders.iter() { @@ -218,8 +281,8 @@ impl NakamotoTenureDownloaderSet { }; debug!( - "Peer {} already bound to downloader for {}", - &naddr, &_downloader.tenure_id_consensus_hash + "Peer {naddr} already bound to downloader for {}", + &_downloader.tenure_id_consensus_hash ); return true; } @@ -231,8 +294,8 @@ impl NakamotoTenureDownloaderSet { continue; } debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + "Assign peer {naddr} to work on downloader for {} in state {}", + &downloader.tenure_id_consensus_hash, &downloader.state ); downloader.naddr = naddr.clone(); self.peers.insert(naddr, i); @@ -251,15 +314,15 @@ impl NakamotoTenureDownloaderSet { idled.push(naddr.clone()); continue; }; - let Some(downloader) = downloader_opt else { - debug!("Remove peer {} for null download {}", &naddr, i); + let Some(downloader) = downloader_opt.as_ref() else { + debug!("Remove peer {naddr} for null download {i}"); idled.push(naddr.clone()); continue; }; if downloader.idle { debug!( - "Remove idled peer {} for tenure download {}", - &naddr, &downloader.tenure_id_consensus_hash + "Remove idled peer {naddr} for tenure download {}", + &downloader.tenure_id_consensus_hash ); idled.push(naddr.clone()); } @@ -273,10 +336,12 @@ impl NakamotoTenureDownloaderSet { /// this up with a call to `clear_available_peers()`. pub fn clear_finished_downloaders(&mut self) { for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { + // clear the downloader if it's done by setting it to None + if downloader_opt + .as_ref() + .map(|dl| dl.is_done()) + .unwrap_or(false) + { *downloader_opt = None; } } @@ -306,8 +371,8 @@ impl NakamotoTenureDownloaderSet { }; if &downloader.tenure_id_consensus_hash == tenure_id { debug!( - "Have downloader for tenure {} already (idle={}, state={}, naddr={})", - tenure_id, downloader.idle, &downloader.state, &downloader.naddr + "Have downloader for tenure {tenure_id} already (idle={}, state={}, naddr={})", + downloader.idle, &downloader.state, &downloader.naddr ); return true; } @@ -337,32 +402,35 @@ impl NakamotoTenureDownloaderSet { self.clear_finished_downloaders(); self.clear_available_peers(); - while self.inflight() < count { + while self.num_scheduled_downloaders() < count { let Some(ch) = schedule.front() else { break; }; - if self.completed_tenures.contains(&ch) { - debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } let Some(neighbors) = available.get_mut(ch) else { // not found on any neighbors, so stop trying this tenure - debug!("No neighbors have tenure {}", ch); + debug!("No neighbors have tenure {ch}"); schedule.pop_front(); continue; }; if neighbors.is_empty() { // no more neighbors to try - debug!("No more neighbors can serve tenure {}", ch); + debug!("No more neighbors can serve tenure {ch}"); schedule.pop_front(); continue; } let Some(naddr) = neighbors.pop() else { - debug!("No more neighbors can serve tenure {}", ch); + debug!("No more neighbors can serve tenure {ch}"); schedule.pop_front(); continue; }; + if get_epoch_time_secs() < *self.deprioritized_peers.get(&naddr).unwrap_or(&0) { + debug!( + "Peer {} is deprioritized until {naddr}", + self.deprioritized_peers.get(&naddr).unwrap_or(&0) + ); + continue; + } + if self.try_resume_peer(naddr.clone()) { continue; }; @@ -373,23 +441,40 @@ impl NakamotoTenureDownloaderSet { let Some(available_tenures) = tenure_block_ids.get(&naddr) else { // this peer doesn't have any known tenures, so try the others - debug!("No tenures available from {}", &naddr); + debug!("No tenures available from {naddr}"); continue; }; let Some(tenure_info) = available_tenures.get(ch) else { // this peer does not have a tenure start/end block for this tenure, so try the // others. - debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + debug!("Neighbor {naddr} does not serve tenure {ch}"); continue; }; + if tenure_info.processed { + // we already have this tenure + debug!("Already have processed tenure {ch}"); + self.completed_tenures + .remove(&CompletedTenure::from(tenure_info)); + continue; + } + if self + .completed_tenures + .contains(&CompletedTenure::from(tenure_info)) + { + debug!( + "Already successfully downloaded tenure {ch} ({}-{})", + &tenure_info.start_block_id, &tenure_info.end_block_id + ); + schedule.pop_front(); + continue; + } let Some(Some(start_reward_set)) = current_reward_cycles .get(&tenure_info.start_reward_cycle) .map(|cycle_info| cycle_info.reward_set()) else { debug!( - "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {tenure_info:?}", tenure_info.start_reward_cycle, - &tenure_info ); schedule.pop_front(); continue; @@ -399,28 +484,33 @@ impl NakamotoTenureDownloaderSet { .map(|cycle_info| cycle_info.reward_set()) else { debug!( - "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {tenure_info:?}", tenure_info.end_reward_cycle, - &tenure_info ); schedule.pop_front(); continue; }; - info!("Download tenure {}", &ch; + let attempt_count = *self.attempted_tenures.get(&ch).unwrap_or(&0); + self.attempted_tenures + .insert(ch.clone(), attempt_count.saturating_add(1)); + + let attempt_failed_count = *self.attempt_failed_tenures.get(&ch).unwrap_or(&0); + + info!("Download tenure {ch}"; + "peer" => %naddr, + "attempt" => attempt_count.saturating_add(1), + "failed" => attempt_failed_count, + "downloads_scheduled" => %self.num_scheduled_downloaders(), + "downloads_total" => %self.num_downloaders(), + "downloads_max_count" => count, + "downloads_inflight" => self.inflight(), "tenure_start_block" => %tenure_info.start_block_id, "tenure_end_block" => %tenure_info.end_block_id, "tenure_start_reward_cycle" => tenure_info.start_reward_cycle, - "tenure_end_reward_cycle" => tenure_info.end_reward_cycle); + "tenure_end_reward_cycle" => tenure_info.end_reward_cycle, + "tenure_burn_height" => tenure_info.tenure_id_burn_block_height); - debug!( - "Download tenure {} (start={}, end={}) (rc {},{})", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle - ); let tenure_download = NakamotoTenureDownloader::new( ch.clone(), tenure_info.start_block_id.clone(), @@ -430,7 +520,7 @@ impl NakamotoTenureDownloaderSet { end_reward_set.clone(), ); - debug!("Request tenure {} from neighbor {}", ch, &naddr); + debug!("Request tenure {ch} from neighbor {naddr}"); self.add_downloader(naddr, tenure_download); schedule.pop_front(); } @@ -459,28 +549,37 @@ impl NakamotoTenureDownloaderSet { // send requests for (naddr, index) in self.peers.iter() { if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", &naddr); + debug!("Peer {naddr} has an inflight request"); continue; } let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); + debug!("No downloader for {naddr}"); continue; }; if downloader.is_done() { debug!( - "Downloader for {} on tenure {} is finished", - &naddr, &downloader.tenure_id_consensus_hash + "Downloader for {naddr} on tenure {} is finished", + &downloader.tenure_id_consensus_hash ); finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + finished_tenures.push(CompletedTenure::from(downloader)); continue; } debug!( - "Send request to {} for tenure {} (state {})", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + "Send request to {naddr} for tenure {} (state {})", + &downloader.tenure_id_consensus_hash, &downloader.state ); let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - debug!("Downloader for {} failed; this peer is dead", &naddr); + info!( + "Downloader for tenure {} to {naddr} failed; this peer is dead", + &downloader.tenure_id_consensus_hash, + ); + Self::mark_failed_and_deprioritize_peer( + &mut self.attempt_failed_tenures, + &mut self.deprioritized_peers, + &downloader.tenure_id_consensus_hash, + naddr, + ); neighbor_rpc.add_dead(network, naddr); continue; }; @@ -494,12 +593,12 @@ impl NakamotoTenureDownloaderSet { // clear dead, broken, and done for naddr in addrs.iter() { if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); + debug!("Remove dead/broken downloader for {naddr}"); self.clear_downloader(&naddr); } } for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); + debug!("Remove finished downloader for {done_naddr}"); self.clear_downloader(&done_naddr); } for done_tenure in finished_tenures.drain(..) { @@ -509,23 +608,35 @@ impl NakamotoTenureDownloaderSet { // handle responses for (naddr, response) in neighbor_rpc.collect_replies(network) { let Some(index) = self.peers.get(&naddr) else { - debug!("No downloader for {}", &naddr); + debug!("No downloader for {naddr}"); continue; }; let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); + debug!("No downloader for {naddr}"); continue; }; - debug!("Got response from {}", &naddr); + debug!("Got response from {naddr}"); let Ok(blocks_opt) = downloader .handle_next_download_response(response) .map_err(|e| { - debug!("Failed to handle response from {}: {:?}", &naddr, &e); + info!( + "Failed to handle response from {naddr} on tenure {}: {e}", + &downloader.tenure_id_consensus_hash, + ); e }) else { - debug!("Failed to handle download response from {}", &naddr); + debug!( + "Failed to handle download response from {naddr} on tenure {}", + &downloader.tenure_id_consensus_hash + ); + Self::mark_failed_and_deprioritize_peer( + &mut self.attempt_failed_tenures, + &mut self.deprioritized_peers, + &downloader.tenure_id_consensus_hash, + &naddr, + ); neighbor_rpc.add_dead(network, &naddr); continue; }; @@ -541,12 +652,16 @@ impl NakamotoTenureDownloaderSet { ); new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); if downloader.is_done() { + info!( + "Downloader for tenure {} is finished", + &downloader.tenure_id_consensus_hash + ); debug!( - "Downloader for {} on tenure {} is finished", - &naddr, &downloader.tenure_id_consensus_hash + "Downloader for tenure {} finished on {naddr}", + &downloader.tenure_id_consensus_hash, ); finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + finished_tenures.push(CompletedTenure::from(downloader)); continue; } } @@ -554,12 +669,12 @@ impl NakamotoTenureDownloaderSet { // clear dead, broken, and done for naddr in addrs.iter() { if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); + debug!("Remove dead/broken downloader for {naddr}"); self.clear_downloader(naddr); } } for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); + debug!("Remove finished downloader for {done_naddr}"); self.clear_downloader(&done_naddr); } for done_tenure in finished_tenures.drain(..) { diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 3f4fcb6165..d5b08f56d2 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -407,16 +407,13 @@ impl InvGenerator { let cur_sortition_info = self.get_sortition_info(sortdb, &cur_consensus_hash)?; let parent_sortition_consensus_hash = cur_sortition_info.parent_consensus_hash; - debug!("Get sortition and tenure info for height {}. cur_consensus_hash = {}, cur_tenure_info = {:?}, parent_sortition_consensus_hash = {}", cur_height, &cur_consensus_hash, &cur_tenure_opt, &parent_sortition_consensus_hash); + trace!("Get sortition and tenure info for height {cur_height}. cur_consensus_hash = {cur_consensus_hash}, cur_tenure_info = {cur_tenure_opt:?}, parent_sortition_consensus_hash = {parent_sortition_consensus_hash}"); if let Some(cur_tenure_info) = cur_tenure_opt.as_ref() { // a tenure was active when this sortition happened... if cur_tenure_info.tenure_id_consensus_hash == cur_consensus_hash { // ...and this tenure started in this sortition - debug!( - "Tenure was started for {} (height {})", - cur_consensus_hash, cur_height - ); + trace!("Tenure was started for {cur_consensus_hash} (height {cur_height})"); tenure_status.push(true); cur_tenure_opt = self.get_processed_tenure( chainstate, @@ -426,19 +423,13 @@ impl InvGenerator { )?; } else { // ...but this tenure did not start in this sortition - debug!( - "Tenure was NOT started for {} (bit {})", - cur_consensus_hash, cur_height - ); + trace!("Tenure was NOT started for {cur_consensus_hash} (bit {cur_height})"); tenure_status.push(false); } } else { // no active tenure during this sortition. Check the parent sortition to see if a // tenure begain there. - debug!( - "No winning sortition for {} (bit {})", - cur_consensus_hash, cur_height - ); + trace!("No winning sortition for {cur_consensus_hash} (bit {cur_height})"); tenure_status.push(false); cur_tenure_opt = self.get_processed_tenure( chainstate, @@ -457,9 +448,9 @@ impl InvGenerator { } tenure_status.reverse(); - debug!( - "Tenure bits off of {} and {}: {:?}", - nakamoto_tip, &tip.consensus_hash, &tenure_status + trace!( + "Tenure bits off of {nakamoto_tip} and {}: {tenure_status:?}", + &tip.consensus_hash ); Ok(tenure_status) } @@ -579,10 +570,10 @@ impl NakamotoTenureInv { /// Reset synchronization state for this peer. Don't remove inventory data; just make it so we /// can talk to the peer again - pub fn try_reset_comms(&mut self, inv_sync_interval: u64, start_rc: u64, cur_rc: u64) { + pub fn try_reset_comms(&mut self, inv_sync_interval: u64, start_rc: u64, max_rc: u64) { let now = get_epoch_time_secs(); if self.start_sync_time + inv_sync_interval <= now - && (self.cur_reward_cycle >= cur_rc || !self.online) + && (self.cur_reward_cycle >= max_rc || !self.online) { self.reset_comms(start_rc); } @@ -618,20 +609,20 @@ impl NakamotoTenureInv { pub fn getnakamotoinv_begin( &mut self, network: &mut PeerNetwork, - current_reward_cycle: u64, + max_reward_cycle: u64, ) -> bool { debug!( "{:?}: Begin Nakamoto inventory sync for {} in cycle {}", network.get_local_peer(), self.neighbor_address, - current_reward_cycle, + max_reward_cycle, ); // possibly reset communications with this peer, if it's time to do so. self.try_reset_comms( network.get_connection_opts().inv_sync_interval, - current_reward_cycle.saturating_sub(network.get_connection_opts().inv_reward_cycles), - current_reward_cycle, + max_reward_cycle.saturating_sub(network.get_connection_opts().inv_reward_cycles), + max_reward_cycle, ); if !self.is_online() { // don't talk to this peer for now @@ -643,7 +634,7 @@ impl NakamotoTenureInv { return false; } - if self.reward_cycle() > current_reward_cycle { + if self.reward_cycle() > max_reward_cycle { // we've fully sync'ed with this peer debug!( "{:?}: fully sync'ed: {}", @@ -908,10 +899,24 @@ impl NakamotoInvStateMachine { ) }); - // try to get all of the reward cycles we know about, plus the next one. We try to get - // the next one as well in case we're at a reward cycle boundary, but we're not at the - // chain tip -- the block downloader still needs that next inventory to proceed. - let proceed = inv.getnakamotoinv_begin(network, current_reward_cycle.saturating_add(1)); + let burnchain_tip_reward_cycle = sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + network.stacks_tip.burnchain_height, + ) + .ok_or(NetError::ChainstateError( + "block height comes before system start".into(), + ))?; + + let max_reward_cycle = if burnchain_tip_reward_cycle > current_reward_cycle { + // try to sync up to the next reward cycle + current_reward_cycle.saturating_add(1) + } else { + current_reward_cycle + }; + + let proceed = inv.getnakamotoinv_begin(network, max_reward_cycle); let inv_rc = inv.reward_cycle(); new_inventories.insert(naddr.clone(), inv); @@ -946,6 +951,7 @@ impl NakamotoInvStateMachine { "peer" => ?naddr, "error" => ?e ); + continue; } } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 2210160bee..87c4c0bf06 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -16,7 +16,6 @@ #[warn(unused_imports)] use std::collections::HashMap; -#[cfg(any(test, feature = "testing"))] use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::io::prelude::*; @@ -1466,7 +1465,7 @@ pub const DENY_BAN_DURATION: u64 = 86400; // seconds (1 day) pub const DENY_MIN_BAN_DURATION: u64 = 2; /// Result of doing network work -#[derive(Clone)] +#[derive(Clone, PartialEq, Debug)] pub struct NetworkResult { /// Stacks chain tip when we began this pass pub stacks_tip: StacksBlockId, @@ -1516,6 +1515,10 @@ pub struct NetworkResult { pub num_connected_peers: usize, /// The observed burnchain height pub burn_height: u64, + /// The observed stacks coinbase height + pub coinbase_height: u64, + /// The observed stacks tip height (different in Nakamoto from coinbase height) + pub stacks_tip_height: u64, /// The consensus hash of the stacks tip (prefixed `rc_` for historical reasons) pub rc_consensus_hash: ConsensusHash, /// The current StackerDB configs @@ -1530,6 +1533,8 @@ impl NetworkResult { num_download_passes: u64, num_connected_peers: usize, burn_height: u64, + coinbase_height: u64, + stacks_tip_height: u64, rc_consensus_hash: ConsensusHash, stacker_db_configs: HashMap, ) -> NetworkResult { @@ -1558,11 +1563,509 @@ impl NetworkResult { num_download_passes: num_download_passes, num_connected_peers, burn_height, + coinbase_height, + stacks_tip_height, rc_consensus_hash, stacker_db_configs, } } + /// Get the set of all StacksBlocks represented + fn all_block_ids(&self) -> HashSet { + let mut blocks: HashSet<_> = self + .blocks + .iter() + .map(|(ch, blk, _)| StacksBlockId::new(&ch, &blk.block_hash())) + .collect(); + + let pushed_blocks: HashSet<_> = self + .pushed_blocks + .iter() + .map(|(_, block_list)| { + block_list + .iter() + .map(|block_data| { + block_data + .blocks + .iter() + .map(|block_datum| { + StacksBlockId::new(&block_datum.0, &block_datum.1.block_hash()) + }) + .collect::>() + }) + .flatten() + }) + .flatten() + .collect(); + + let uploaded_blocks: HashSet<_> = self + .uploaded_blocks + .iter() + .map(|blk_data| { + blk_data + .blocks + .iter() + .map(|blk| StacksBlockId::new(&blk.0, &blk.1.block_hash())) + }) + .flatten() + .collect(); + + blocks.extend(pushed_blocks.into_iter()); + blocks.extend(uploaded_blocks.into_iter()); + blocks + } + + /// Get the set of all microblocks represented + fn all_microblock_hashes(&self) -> HashSet { + let mut mblocks: HashSet<_> = self + .confirmed_microblocks + .iter() + .map(|(_, mblocks, _)| mblocks.iter().map(|mblk| mblk.block_hash())) + .flatten() + .collect(); + + let pushed_microblocks: HashSet<_> = self + .pushed_microblocks + .iter() + .map(|(_, mblock_list)| { + mblock_list + .iter() + .map(|(_, mblock_data)| { + mblock_data + .microblocks + .iter() + .map(|mblock| mblock.block_hash()) + }) + .flatten() + }) + .flatten() + .collect(); + + let uploaded_microblocks: HashSet<_> = self + .uploaded_microblocks + .iter() + .map(|mblk_data| mblk_data.microblocks.iter().map(|mblk| mblk.block_hash())) + .flatten() + .collect(); + + mblocks.extend(pushed_microblocks.into_iter()); + mblocks.extend(uploaded_microblocks.into_iter()); + mblocks + } + + /// Get the set of all nakamoto blocks represented + fn all_nakamoto_block_ids(&self) -> HashSet { + let mut naka_block_ids: HashSet<_> = self + .nakamoto_blocks + .iter() + .map(|(_, nblk)| nblk.block_id()) + .collect(); + + let pushed_nakamoto_blocks: HashSet<_> = self + .pushed_nakamoto_blocks + .iter() + .map(|(_, naka_blocks_list)| { + naka_blocks_list + .iter() + .map(|(_, naka_blocks)| { + naka_blocks + .blocks + .iter() + .map(|nblk| nblk.block_id()) + .collect::>() + }) + .collect::>>() + }) + .collect::>>>() + .into_iter() + .flatten() + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }); + + let uploaded_nakamoto_blocks: HashSet<_> = self + .uploaded_nakamoto_blocks + .iter() + .map(|nblk| nblk.block_id()) + .collect(); + + naka_block_ids.extend(pushed_nakamoto_blocks.into_iter()); + naka_block_ids.extend(uploaded_nakamoto_blocks.into_iter()); + naka_block_ids + } + + /// Get the set of all txids represented + fn all_txids(&self) -> HashSet { + let mut txids: HashSet<_> = self + .uploaded_transactions + .iter() + .map(|tx| tx.txid()) + .collect(); + let pushed_txids: HashSet<_> = self + .pushed_transactions + .iter() + .map(|(_, tx_list)| { + tx_list + .iter() + .map(|(_, tx)| tx.txid()) + .collect::>() + }) + .collect::>>() + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }); + + let synced_txids: HashSet<_> = self + .synced_transactions + .iter() + .map(|tx| tx.txid()) + .collect(); + + txids.extend(pushed_txids.into_iter()); + txids.extend(synced_txids.into_iter()); + txids + } + + /// Get all unhandled message signatures. + /// This is unique per message. + fn all_msg_sigs(&self) -> HashSet { + self.unhandled_messages + .iter() + .map(|(_, msgs)| { + msgs.iter() + .map(|msg| msg.preamble.signature.clone()) + .collect::>() + }) + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }) + } + + /// Merge self into `newer`, and return `newer`. + /// deduplicate messages when possible. + pub fn update(mut self, mut newer: NetworkResult) -> Self { + // merge unhandled messaegs, but deduplicate + let newer_msgs = newer.all_msg_sigs(); + for (nk, mut msgs) in self.unhandled_messages.drain() { + msgs.retain(|msg| { + let retain = !newer_msgs.contains(&msg.preamble.signature); + if !retain { + debug!( + "Drop duplicate p2p message {} seq {}", + &msg.get_message_name(), + &msg.preamble.seq + ); + } + retain + }); + if let Some(newer_msgs) = newer.unhandled_messages.get_mut(&nk) { + newer_msgs.append(&mut msgs); + } else { + newer.unhandled_messages.insert(nk, msgs); + } + } + + let newer_blocks = newer.all_block_ids(); + let newer_microblocks = newer.all_microblock_hashes(); + let newer_naka_blocks = newer.all_nakamoto_block_ids(); + let newer_txids = newer.all_txids(); + + // only retain blocks not found in `newer` + self.blocks.retain(|(ch, blk, _)| { + let block_id = StacksBlockId::new(&ch, &blk.block_hash()); + let retain = !newer_blocks.contains(&block_id); + if !retain { + debug!("Drop duplicate downloaded block {}", &block_id); + } + retain + }); + newer.blocks.append(&mut self.blocks); + + // merge microblocks, but deduplicate + self.confirmed_microblocks + .retain_mut(|(_, ref mut mblocks, _)| { + mblocks.retain(|mblk| { + let retain = !newer_microblocks.contains(&mblk.block_hash()); + if !retain { + debug!( + "Drop duplicate downloaded microblock {}", + &mblk.block_hash() + ); + } + retain + }); + mblocks.len() > 0 + }); + newer + .confirmed_microblocks + .append(&mut self.confirmed_microblocks); + + // merge nakamoto blocks, but deduplicate + self.nakamoto_blocks.retain(|_, nblk| { + let retain = !newer_naka_blocks.contains(&nblk.block_id()); + if !retain { + debug!( + "Drop duplicate downloaded nakamoto block {}", + &nblk.block_id() + ); + } + retain + }); + newer.nakamoto_blocks.extend(self.nakamoto_blocks.drain()); + + // merge pushed transactions, but deduplicate + for (nk, mut tx_data) in self.pushed_transactions.drain() { + tx_data.retain(|(_, tx)| { + let retain = !newer_txids.contains(&tx.txid()); + if !retain { + debug!("Drop duplicate pushed transaction {}", &tx.txid()); + } + retain + }); + if tx_data.len() == 0 { + continue; + } + + if let Some(newer_tx_data) = newer.pushed_transactions.get_mut(&nk) { + newer_tx_data.append(&mut tx_data); + } else { + newer.pushed_transactions.insert(nk, tx_data); + } + } + + // merge pushed blocks, but deduplicate + for (nk, mut block_list) in self.pushed_blocks.drain() { + block_list.retain_mut(|ref mut block_data| { + block_data.blocks.retain(|blk_datum| { + let block_id = StacksBlockId::new(&blk_datum.0, &blk_datum.1.block_hash()); + let retain = !newer_blocks.contains(&block_id); + if !retain { + debug!("Drop duplicate pushed block {}", &block_id); + } + retain + }); + block_data.blocks.len() > 0 + }); + if block_list.len() == 0 { + continue; + } + + if let Some(newer_block_data) = newer.pushed_blocks.get_mut(&nk) { + newer_block_data.append(&mut block_list); + } else { + newer.pushed_blocks.insert(nk, block_list); + } + } + + // merge pushed microblocks, but deduplicate + for (nk, mut microblock_data) in self.pushed_microblocks.drain() { + microblock_data.retain_mut(|(_, ref mut mblock_data)| { + mblock_data.microblocks.retain(|mblk| { + let retain = !newer_microblocks.contains(&mblk.block_hash()); + if !retain { + debug!("Drop duplicate pushed microblock {}", &mblk.block_hash()); + } + retain + }); + mblock_data.microblocks.len() > 0 + }); + if microblock_data.len() == 0 { + continue; + } + + if let Some(newer_microblock_data) = newer.pushed_microblocks.get_mut(&nk) { + newer_microblock_data.append(&mut microblock_data); + } else { + newer.pushed_microblocks.insert(nk, microblock_data); + } + } + + // merge pushed nakamoto blocks, but deduplicate + for (nk, mut nakamoto_block_data) in self.pushed_nakamoto_blocks.drain() { + nakamoto_block_data.retain_mut(|(_, ref mut naka_blocks)| { + naka_blocks.blocks.retain(|nblk| { + let retain = !newer_naka_blocks.contains(&nblk.block_id()); + if !retain { + debug!("Drop duplicate pushed nakamoto block {}", &nblk.block_id()); + } + retain + }); + naka_blocks.blocks.len() > 0 + }); + if nakamoto_block_data.len() == 0 { + continue; + } + + if let Some(newer_nakamoto_data) = newer.pushed_nakamoto_blocks.get_mut(&nk) { + newer_nakamoto_data.append(&mut nakamoto_block_data); + } else { + newer.pushed_nakamoto_blocks.insert(nk, nakamoto_block_data); + } + } + + // merge uploaded data, but deduplicate + self.uploaded_transactions.retain(|tx| { + let retain = !newer_txids.contains(&tx.txid()); + if !retain { + debug!("Drop duplicate uploaded transaction {}", &tx.txid()); + } + retain + }); + self.uploaded_blocks.retain_mut(|ref mut blk_data| { + blk_data.blocks.retain(|blk| { + let block_id = StacksBlockId::new(&blk.0, &blk.1.block_hash()); + let retain = !newer_blocks.contains(&block_id); + if !retain { + debug!("Drop duplicate uploaded block {}", &block_id); + } + retain + }); + + blk_data.blocks.len() > 0 + }); + self.uploaded_microblocks.retain_mut(|ref mut mblock_data| { + mblock_data.microblocks.retain(|mblk| { + let retain = !newer_microblocks.contains(&mblk.block_hash()); + if !retain { + debug!("Drop duplicate uploaded microblock {}", &mblk.block_hash()); + } + retain + }); + + mblock_data.microblocks.len() > 0 + }); + self.uploaded_nakamoto_blocks.retain(|nblk| { + let retain = !newer_naka_blocks.contains(&nblk.block_id()); + if !retain { + debug!( + "Drop duplicate uploaded nakamoto block {}", + &nblk.block_id() + ); + } + retain + }); + + newer + .uploaded_transactions + .append(&mut self.uploaded_transactions); + newer.uploaded_blocks.append(&mut self.uploaded_blocks); + newer + .uploaded_microblocks + .append(&mut self.uploaded_microblocks); + newer + .uploaded_nakamoto_blocks + .append(&mut self.uploaded_nakamoto_blocks); + + // merge uploaded/pushed stackerdb, but drop stale versions + let newer_stackerdb_chunk_versions: HashMap<_, _> = newer + .uploaded_stackerdb_chunks + .iter() + .chain(newer.pushed_stackerdb_chunks.iter()) + .map(|chunk| { + ( + ( + chunk.contract_id.clone(), + chunk.rc_consensus_hash.clone(), + chunk.chunk_data.slot_id, + ), + chunk.chunk_data.slot_version, + ) + }) + .collect(); + + self.uploaded_stackerdb_chunks.retain(|push_chunk| { + if push_chunk.rc_consensus_hash != newer.rc_consensus_hash { + debug!( + "Drop pushed StackerDB chunk for {} due to stale view ({} != {}): {:?}", + &push_chunk.contract_id, + &push_chunk.rc_consensus_hash, + &newer.rc_consensus_hash, + &push_chunk.chunk_data + ); + return false; + } + if let Some(version) = newer_stackerdb_chunk_versions.get(&( + push_chunk.contract_id.clone(), + push_chunk.rc_consensus_hash.clone(), + push_chunk.chunk_data.slot_id, + )) { + let retain = push_chunk.chunk_data.slot_version > *version; + if !retain { + debug!( + "Drop pushed StackerDB chunk for {} due to stale version: {:?}", + &push_chunk.contract_id, &push_chunk.chunk_data + ); + } + retain + } else { + true + } + }); + + self.pushed_stackerdb_chunks.retain(|push_chunk| { + if push_chunk.rc_consensus_hash != newer.rc_consensus_hash { + debug!( + "Drop uploaded StackerDB chunk for {} due to stale view ({} != {}): {:?}", + &push_chunk.contract_id, + &push_chunk.rc_consensus_hash, + &newer.rc_consensus_hash, + &push_chunk.chunk_data + ); + return false; + } + if let Some(version) = newer_stackerdb_chunk_versions.get(&( + push_chunk.contract_id.clone(), + push_chunk.rc_consensus_hash.clone(), + push_chunk.chunk_data.slot_id, + )) { + let retain = push_chunk.chunk_data.slot_version > *version; + if !retain { + debug!( + "Drop uploaded StackerDB chunk for {} due to stale version: {:?}", + &push_chunk.contract_id, &push_chunk.chunk_data + ); + } + retain + } else { + true + } + }); + + newer + .uploaded_stackerdb_chunks + .append(&mut self.uploaded_stackerdb_chunks); + newer + .pushed_stackerdb_chunks + .append(&mut self.pushed_stackerdb_chunks); + + // dedup sync'ed transactions + self.synced_transactions.retain(|tx| { + let retain = !newer_txids.contains(&tx.txid()); + if !retain { + debug!("Drop duplicate sync'ed transaction {}", &tx.txid()); + } + retain + }); + + newer + .synced_transactions + .append(&mut self.synced_transactions); + + // no dedup here, but do merge + newer + .stacker_db_sync_results + .append(&mut self.stacker_db_sync_results); + newer.attachments.append(&mut self.attachments); + + newer + } + pub fn has_blocks(&self) -> bool { self.blocks.len() > 0 || self.pushed_blocks.len() > 0 } @@ -1616,6 +2119,10 @@ impl NetworkResult { || self.has_stackerdb_chunks() } + pub fn has_block_data_to_store(&self) -> bool { + self.has_blocks() || self.has_microblocks() || self.has_nakamoto_blocks() + } + pub fn consume_unsolicited(&mut self, unhandled_messages: PendingMessages) { for ((_event_id, neighbor_key), messages) in unhandled_messages.into_iter() { for message in messages.into_iter() { @@ -1734,6 +2241,7 @@ pub mod test { use clarity::boot_util::boot_code_id; use clarity::types::sqlite::NO_PARAMS; + use clarity::vm::ast::parser::v1::CONTRACT_MAX_NAME_LENGTH; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::STXBalance; @@ -2486,7 +2994,17 @@ pub mod test { let smart_contract = TransactionPayload::SmartContract( TransactionSmartContract { name: ContractName::try_from( - conf.test_name.replace("::", "-").to_string(), + conf.test_name + .replace("::", "-") + .chars() + .skip( + conf.test_name + .len() + .saturating_sub(CONTRACT_MAX_NAME_LENGTH), + ) + .collect::() + .trim_start_matches(|c: char| !c.is_alphabetic()) + .to_string(), ) .expect("FATAL: invalid boot-code contract name"), code_body: StacksString::from_str(&conf.setup_code) @@ -2633,10 +3151,13 @@ pub mod test { let stackerdb_contracts: Vec<_> = stacker_db_syncs.keys().map(|cid| cid.clone()).collect(); + let burnchain_db = config.burnchain.open_burnchain_db(false).unwrap(); + let mut peer_network = PeerNetwork::new( peerdb, atlasdb, p2p_stacker_dbs, + burnchain_db, local_peer, config.peer_version, config.burnchain.clone(), @@ -2914,8 +3435,6 @@ pub mod test { let mut stacks_node = self.stacks_node.take().unwrap(); let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); - let old_tip = self.network.stacks_tip.clone(); - self.network .refresh_burnchain_view(&indexer, &sortdb, &mut stacks_node.chainstate, false) .unwrap(); @@ -2924,6 +3443,28 @@ pub mod test { self.stacks_node = Some(stacks_node); } + pub fn refresh_reward_cycles(&mut self) { + let sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_block_id = self.network.stacks_tip.block_id(); + let tip_height = self.network.stacks_tip.height; + + self.network + .refresh_reward_cycles( + &sortdb, + &mut stacks_node.chainstate, + &tip, + &tip_block_id, + tip_height, + ) + .unwrap(); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(stacks_node); + } + pub fn for_each_convo_p2p(&mut self, mut f: F) -> Vec> where F: FnMut(usize, &mut ConversationP2P) -> Result, diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 054fefaf1d..a20145a2b6 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -243,11 +243,18 @@ impl CurrentRewardSet { /// Cached stacks chain tip info, consumed by RPC endpoints #[derive(Clone, Debug, PartialEq)] pub struct StacksTipInfo { + /// consensus hash of the highest processed stacks block pub consensus_hash: ConsensusHash, + /// block hash of the highest processed stacks block pub block_hash: BlockHeaderHash, + /// height of the highest processed stacks block pub height: u64, + /// coinbase height of the highest processed tenure pub coinbase_height: u64, + /// whether or not the system has transitioned to Nakamoto pub is_nakamoto: bool, + /// highest burnchain block discovered + pub burnchain_height: u64, } impl StacksTipInfo { @@ -258,6 +265,7 @@ impl StacksTipInfo { height: 0, coinbase_height: 0, is_nakamoto: false, + burnchain_height: 0, } } @@ -306,6 +314,9 @@ pub struct PeerNetwork { pub peerdb: PeerDB, pub atlasdb: AtlasDB, + // handle to burnchain DB + pub burnchain_db: BurnchainDB, + // ongoing p2p conversations (either they reached out to us, or we to them) pub peers: PeerMap, pub sockets: HashMap, @@ -444,6 +455,7 @@ impl PeerNetwork { peerdb: PeerDB, atlasdb: AtlasDB, stackerdbs: StackerDBs, + burnchain_db: BurnchainDB, mut local_peer: LocalPeer, peer_version: u32, burnchain: Burnchain, @@ -509,6 +521,8 @@ impl PeerNetwork { peerdb, atlasdb, + burnchain_db, + peers: PeerMap::new(), sockets: HashMap::new(), events: HashMap::new(), @@ -4257,6 +4271,7 @@ impl PeerNetwork { .anchored_header .as_stacks_nakamoto() .is_some(), + burnchain_height: self.stacks_tip.burnchain_height, }; debug!( "{:?}: Parent Stacks tip off of {} is {:?}", @@ -4280,18 +4295,88 @@ impl PeerNetwork { } } + /// Determine if we need to invalidate a given cached reward set. + /// + /// In Epoch 2, this requires checking the first sortition in the start of the reward set's + /// reward phase. + /// + /// In Nakamoto, this requires checking the anchor block in the prepare phase for the upcoming + /// reward phase. + fn check_reload_cached_reward_set( + &self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + rc: u64, + tip_sn: &BlockSnapshot, + tip_block_id: &StacksBlockId, + tip_height: u64, + ) -> Result { + let epoch = self.get_epoch_at_burn_height(tip_sn.block_height); + if epoch.epoch_id >= StacksEpochId::Epoch30 { + // epoch 3, where there are no forks except from bugs or burnchain reorgs. + // invalidate reward cycles on burnchain or stacks reorg, should they ever happen + let reorg = Self::is_reorg(Some(&self.burnchain_tip), tip_sn, sortdb) + || Self::is_nakamoto_reorg( + &self.stacks_tip.block_id(), + self.stacks_tip.height, + tip_block_id, + tip_height, + chainstate, + ); + return Ok(reorg); + } else { + // epoch 2 + // NOTE: + 1 needed because the sortition db indexes anchor blocks at index height 1, + // not 0 + let ih = sortdb.index_handle(&tip_sn.sortition_id); + let rc_start_height = self.burnchain.nakamoto_first_block_of_cycle(rc) + 1; + let Some(ancestor_sort_id) = + get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? + else { + // reward cycle is too far back for there to be an ancestor, so no need to + // reload + test_debug!( + "No ancestor sortition ID off of {} (height {}) at {rc_start_height})", + &tip_sn.sortition_id, + tip_sn.block_height + ); + return Ok(false); + }; + let ancestor_ih = sortdb.index_handle(&ancestor_sort_id); + let anchor_hash_opt = ancestor_ih.get_last_anchor_block_hash()?; + + if let Some(cached_rc_info) = self.current_reward_sets.get(&rc) { + if let Some(anchor_hash) = anchor_hash_opt.as_ref() { + // careful -- the sortition DB stores a StacksBlockId's value (the tenure-start + // StacksBlockId) as a BlockHeaderHash, since that's what it was designed to + // deal with in the pre-Nakamoto days + if cached_rc_info.anchor_block_id() == StacksBlockId(anchor_hash.0.clone()) + || cached_rc_info.anchor_block_hash == *anchor_hash + { + // cached reward set data is still valid + test_debug!("Cached reward cycle {rc} is still valid"); + return Ok(false); + } + } + } + } + + Ok(true) + } + /// Refresh our view of the last three reward cycles /// This ensures that the PeerNetwork has cached copies of the reward cycle data (including the /// signing set) for the current, previous, and previous-previous reward cycles. This data is /// in turn consumed by the Nakamoto block downloader, which must validate blocks signed from /// any of these reward cycles. #[cfg_attr(test, mutants::skip)] - fn refresh_reward_cycles( + pub fn refresh_reward_cycles( &mut self, sortdb: &SortitionDB, chainstate: &mut StacksChainState, tip_sn: &BlockSnapshot, tip_block_id: &StacksBlockId, + tip_height: u64, ) -> Result<(), net_error> { let cur_rc = self .burnchain @@ -4300,35 +4385,22 @@ impl PeerNetwork { let prev_rc = cur_rc.saturating_sub(1); let prev_prev_rc = prev_rc.saturating_sub(1); - let ih = sortdb.index_handle(&tip_sn.sortition_id); for rc in [cur_rc, prev_rc, prev_prev_rc] { debug!("Refresh reward cycle info for cycle {}", rc); - let rc_start_height = self.burnchain.nakamoto_first_block_of_cycle(rc); - let Some(ancestor_sort_id) = - get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? - else { - // reward cycle is too far back for there to be an ancestor + if self.current_reward_sets.contains_key(&rc) + && !self.check_reload_cached_reward_set( + sortdb, + chainstate, + rc, + tip_sn, + tip_block_id, + tip_height, + )? + { continue; - }; - let ancestor_ih = sortdb.index_handle(&ancestor_sort_id); - let anchor_hash_opt = ancestor_ih.get_last_anchor_block_hash()?; - - if let Some(cached_rc_info) = self.current_reward_sets.get(&rc) { - if let Some(anchor_hash) = anchor_hash_opt.as_ref() { - // careful -- the sortition DB stores a StacksBlockId's value (the tenure-start - // StacksBlockId) as a BlockHeaderHash, since that's what it was designed to - // deal with in the pre-Nakamoto days - if cached_rc_info.anchor_block_id() == StacksBlockId(anchor_hash.0.clone()) - || cached_rc_info.anchor_block_hash == *anchor_hash - { - // cached reward set data is still valid - continue; - } - } } - - debug!("Load reward cycle info for cycle {}", rc); + debug!("Refresh reward cycle info for cycle {rc}"); let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( rc, &tip_sn.sortition_id, @@ -4385,6 +4457,7 @@ impl PeerNetwork { let (stacks_tip_ch, stacks_tip_bhh, stacks_tip_height) = SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sortdb.conn())?; + let new_burnchain_tip = self.burnchain_db.get_canonical_chain_tip()?; let burnchain_tip_changed = canonical_sn.block_height != self.chain_view.burn_block_height || self.num_state_machine_passes == 0 || canonical_sn.sortition_id != self.burnchain_tip.sortition_id; @@ -4434,6 +4507,7 @@ impl PeerNetwork { chainstate, &canonical_sn, &new_stacks_tip_block_id, + stacks_tip_height, )?; } @@ -4463,6 +4537,7 @@ impl PeerNetwork { height: 0, coinbase_height: 0, is_nakamoto: false, + burnchain_height: 0, } } Err(e) => return Err(e), @@ -4534,12 +4609,10 @@ impl PeerNetwork { if self.get_current_epoch().epoch_id < StacksEpochId::Epoch30 { // update heaviest affirmation map view - let burnchain_db = self.burnchain.open_burnchain_db(false)?; - self.heaviest_affirmation_map = static_get_heaviest_affirmation_map( &self.burnchain, indexer, - &burnchain_db, + &self.burnchain_db, sortdb, &canonical_sn.sortition_id, ) @@ -4550,7 +4623,7 @@ impl PeerNetwork { self.tentative_best_affirmation_map = static_get_canonical_affirmation_map( &self.burnchain, indexer, - &burnchain_db, + &self.burnchain_db, sortdb, chainstate, &canonical_sn.sortition_id, @@ -4591,9 +4664,8 @@ impl PeerNetwork { if stacks_tip_changed && self.get_current_epoch().epoch_id < StacksEpochId::Epoch30 { // update stacks tip affirmation map view // (NOTE: this check has to happen _after_ self.chain_view gets updated!) - let burnchain_db = self.burnchain.open_burnchain_db(false)?; self.stacks_tip_affirmation_map = static_get_stacks_tip_affirmation_map( - &burnchain_db, + &self.burnchain_db, sortdb, &canonical_sn.sortition_id, &canonical_sn.canonical_stacks_tip_consensus_hash, @@ -4633,7 +4705,7 @@ impl PeerNetwork { debug!( "{:?}: handle unsolicited stacks messages: tenure changed {} != {}, {} buffered", self.get_local_peer(), - &self.burnchain_tip.consensus_hash, + &self.stacks_tip.consensus_hash, &canonical_sn.consensus_hash, self.pending_stacks_messages .iter() @@ -4659,8 +4731,10 @@ impl PeerNetwork { height: stacks_tip_height, coinbase_height, is_nakamoto: stacks_tip_is_nakamoto, + burnchain_height: new_burnchain_tip.block_height, }; self.parent_stacks_tip = parent_stacks_tip; + self.parent_stacks_tip.burnchain_height = new_burnchain_tip.block_height; debug!( "{:?}: canonical Stacks tip is now {:?}", @@ -4733,7 +4807,6 @@ impl PeerNetwork { ibd, true, ); - let unhandled_messages = self.handle_unsolicited_stacks_messages(chainstate, unhandled_messages, true); @@ -4980,7 +5053,7 @@ impl PeerNetwork { Ok(()) } - /// Static helper to check to see if there has been a reorg + /// Static helper to check to see if there has been a burnchain reorg pub fn is_reorg( last_sort_tip: Option<&BlockSnapshot>, sort_tip: &BlockSnapshot, @@ -5003,15 +5076,15 @@ impl PeerNetwork { { // current and previous sortition tips are at the same height, but represent different // blocks. - debug!( - "Reorg detected at burn height {}: {} != {}", + info!( + "Burnchain reorg detected at burn height {}: {} != {}", sort_tip.block_height, &last_sort_tip.consensus_hash, &sort_tip.consensus_hash ); return true; } // It will never be the case that the last and current tip have different heights, but the - // smae consensus hash. If they have the same height, then we would have already returned + // same consensus hash. If they have the same height, then we would have already returned // since we've handled both the == and != cases for their consensus hashes. So if we reach // this point, the heights and consensus hashes are not equal. We only need to check that // last_sort_tip is an ancestor of sort_tip @@ -5043,6 +5116,60 @@ impl PeerNetwork { false } + /// Static helper to check to see if there has been a Nakamoto reorg. + /// Return true if there's a Nakamoto reorg + /// Return false otherwise. + pub fn is_nakamoto_reorg( + last_stacks_tip: &StacksBlockId, + last_stacks_tip_height: u64, + stacks_tip: &StacksBlockId, + stacks_tip_height: u64, + chainstate: &StacksChainState, + ) -> bool { + if last_stacks_tip == stacks_tip { + // same tip + return false; + } + + if last_stacks_tip_height == stacks_tip_height && last_stacks_tip != stacks_tip { + // last block is a sibling + info!( + "Stacks reorg detected at stacks height {last_stacks_tip_height}: {last_stacks_tip} != {stacks_tip}", + ); + return true; + } + + if stacks_tip_height < last_stacks_tip_height { + info!( + "Stacks reorg (chain shrink) detected at stacks height {last_stacks_tip_height}: {last_stacks_tip} != {stacks_tip}", + ); + return true; + } + + // It will never be the case that the last and current tip have different heights, but the + // same block ID. If they have the same height, then we would have already returned + // since we've handled both the == and != cases for their block IDs. So if we reach + // this point, the heights and block IDs are not equal. We only need to check that + // last_stacks_tip is an ancestor of stacks_tip + + let mut cursor = stacks_tip.clone(); + for _ in last_stacks_tip_height..stacks_tip_height { + let Ok(Some(parent_id)) = + NakamotoChainState::get_nakamoto_parent_block_id(chainstate.db(), &cursor) + else { + error!("Failed to load parent id of {cursor}"); + return true; + }; + cursor = parent_id; + } + + debug!("is_nakamoto_reorg check"; + "parent_id" => %cursor, + "last_stacks_tip" => %last_stacks_tip); + + cursor != *last_stacks_tip + } + /// Log our neighbors. /// Used for testing and debuggin fn log_neighbors(&mut self) { @@ -5125,6 +5252,10 @@ impl PeerNetwork { } }; + test_debug!( + "unsolicited_buffered_messages = {:?}", + &unsolicited_buffered_messages + ); let mut network_result = NetworkResult::new( self.stacks_tip.block_id(), self.num_state_machine_passes, @@ -5132,6 +5263,8 @@ impl PeerNetwork { self.num_downloader_passes, self.peers.len(), self.chain_view.burn_block_height, + self.stacks_tip.coinbase_height, + self.stacks_tip.height, self.chain_view.rc_consensus_hash.clone(), self.get_stacker_db_configs_owned(), ); @@ -5266,7 +5399,7 @@ mod test { network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), network_name: "testnet".to_string(), - working_dir: "/nope".to_string(), + working_dir: ":memory:".to_string(), consensus_hash_lifetime: 24, stable_confirmations: 7, initial_reward_start_block: 50, @@ -5297,12 +5430,14 @@ mod test { let atlas_config = AtlasConfig::new(false); let atlasdb = AtlasDB::connect_memory(atlas_config).unwrap(); let stacker_db = StackerDBs::connect_memory(); + let burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); let local_peer = PeerDB::get_local_peer(db.conn()).unwrap(); let p2p = PeerNetwork::new( db, atlasdb, stacker_db, + burnchain_db, local_peer, 0x12345678, burnchain, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7e4ecbb408..b5fbf76cf4 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1703,6 +1703,7 @@ impl Relayer { sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, coord_comms: Option<&CoordinatorChannels>, + reject_blocks_pushed: bool, ) -> Result<(Vec, Vec), net_error> { let mut pushed_blocks = vec![]; let mut bad_neighbors = vec![]; @@ -1731,6 +1732,14 @@ impl Relayer { for nakamoto_block in nakamoto_blocks_data.blocks.drain(..) { let block_id = nakamoto_block.block_id(); + if reject_blocks_pushed { + debug!( + "Received pushed Nakamoto block {} from {}, but configured to reject it.", + block_id, neighbor_key + ); + continue; + } + debug!( "Received pushed Nakamoto block {} from {}", block_id, neighbor_key @@ -2092,6 +2101,7 @@ impl Relayer { /// Returns the list of Nakamoto blocks we stored, as well as the list of bad neighbors that /// sent us invalid blocks. pub fn process_new_nakamoto_blocks( + connection_opts: &ConnectionOptions, network_result: &mut NetworkResult, burnchain: &Burnchain, sortdb: &mut SortitionDB, @@ -2128,6 +2138,7 @@ impl Relayer { sortdb, chainstate, coord_comms, + connection_opts.reject_blocks_pushed, ) { Ok(x) => x, Err(e) => { @@ -2311,8 +2322,6 @@ impl Relayer { event_observer, )?; - update_stacks_tip_height(chain_height as i64); - Ok(ret) } @@ -2848,6 +2857,7 @@ impl Relayer { coord_comms: Option<&CoordinatorChannels>, ) -> u64 { let (accepted_blocks, bad_neighbors) = match Self::process_new_nakamoto_blocks( + &self.connection_opts, network_result, burnchain, sortdb, @@ -3022,6 +3032,10 @@ impl Relayer { event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), )?; + update_stacks_tip_height( + i64::try_from(network_result.stacks_tip_height).unwrap_or(i64::MAX), + ); + let receipts = ProcessedNetReceipts { mempool_txs_added, processed_unconfirmed_state, diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 57d1a427dc..bbbec21290 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -155,7 +155,7 @@ pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; pub const MINER_SLOT_COUNT: u32 = 2; /// Final result of synchronizing state with a remote set of DB replicas -#[derive(Clone)] +#[derive(Clone, PartialEq, Debug)] pub struct StackerDBSyncResult { /// which contract this is a replica for pub contract_id: QualifiedContractIdentifier, diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 6e61e7e610..d9c7402bf8 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -22,18 +22,24 @@ pub mod mempool; pub mod neighbors; pub mod relay; -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use clarity::vm::clarity::ClarityConnection; -use clarity::vm::types::PrincipalData; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use libstackerdb::StackerDBChunkData; use rand::prelude::SliceRandom; use rand::{thread_rng, Rng, RngCore}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; +use stacks_common::bitvec::BitVec; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use stacks_common::types::chainstate::{ - StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, + BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, + StacksPublicKey, TrieHash, }; +use stacks_common::types::net::PeerAddress; use stacks_common::types::{Address, StacksEpochId}; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::VRFProof; use crate::burnchains::PoxConstants; @@ -45,7 +51,7 @@ use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::test::{ key_to_stacks_addr, make_pox_4_lockup, make_pox_4_lockup_chain_id, make_signer_key_signature, @@ -54,8 +60,10 @@ use crate::chainstate::stacks::boot::test::{ use crate::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; +use crate::chainstate::stacks::db::blocks::test::make_empty_coinbase_block; use crate::chainstate::stacks::db::{MinerPaymentTxFees, StacksAccount, StacksChainState}; use crate::chainstate::stacks::events::TransactionOrigin; +use crate::chainstate::stacks::test::make_codec_test_microblock; use crate::chainstate::stacks::{ CoinbasePayload, StacksTransaction, StacksTransactionSigner, TenureChangeCause, TenureChangePayload, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, @@ -66,6 +74,10 @@ use crate::core::{StacksEpoch, StacksEpochExtension}; use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; +use crate::net::{ + BlocksData, BlocksDatum, MicroblocksData, NakamotoBlocksData, NeighborKey, NetworkResult, + PingData, StackerDBPushChunkData, StacksMessage, StacksMessageType, +}; use crate::util_lib::boot::boot_code_id; use crate::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; @@ -525,7 +537,7 @@ impl NakamotoBootPlan { }) .collect(); - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); let mut stacks_block = peer.tenure_with_txs(&stack_txs, &mut peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -533,13 +545,14 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { - let old_tip = other_peer.network.stacks_tip.clone(); + let mut old_tip = other_peer.network.stacks_tip.clone(); other_peer.tenure_with_txs(&stack_txs, other_peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -548,6 +561,7 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = other_peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, other_peer.network.parent_stacks_tip); } } @@ -560,7 +574,7 @@ impl NakamotoBootPlan { .burnchain .is_in_prepare_phase(sortition_height.into()) { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); stacks_block = peer.tenure_with_txs(&[], &mut peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -568,13 +582,14 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } other_peers .iter_mut() .zip(other_peer_nonces.iter_mut()) .for_each(|(peer, nonce)| { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); peer.tenure_with_txs(&[], nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -583,6 +598,7 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } }); @@ -595,7 +611,7 @@ impl NakamotoBootPlan { // advance to the start of epoch 3.0 while sortition_height < epoch_30_height - 1 { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); peer.tenure_with_txs(&vec![], &mut peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -603,13 +619,14 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); other_peer.tenure_with_txs(&vec![], other_peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -618,6 +635,8 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = + other_peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, other_peer.network.parent_stacks_tip); } } @@ -1125,3 +1144,676 @@ fn test_boot_nakamoto_peer() { let observer = TestEventObserver::new(); let (peer, other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(&observer)); } + +#[test] +fn test_network_result_update() { + let mut network_result_1 = NetworkResult::new( + StacksBlockId([0x11; 32]), + 1, + 1, + 1, + 1, + 1, + 1, + 1, + ConsensusHash([0x11; 20]), + HashMap::new(), + ); + + let mut network_result_2 = NetworkResult::new( + StacksBlockId([0x22; 32]), + 2, + 2, + 2, + 2, + 2, + 2, + 2, + ConsensusHash([0x22; 20]), + HashMap::new(), + ); + + let nk1 = NeighborKey { + peer_version: 1, + network_id: 1, + addrbytes: PeerAddress([0x11; 16]), + port: 1, + }; + + let nk2 = NeighborKey { + peer_version: 2, + network_id: 2, + addrbytes: PeerAddress([0x22; 16]), + port: 2, + }; + + let msg1 = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x11; 32]), + 1, + &BurnchainHeaderHash([0x11; 32]), + StacksMessageType::Ping(PingData { nonce: 1 }), + ); + + let mut msg2 = StacksMessage::new( + 2, + 2, + 2, + &BurnchainHeaderHash([0x22; 32]), + 2, + &BurnchainHeaderHash([0x22; 32]), + StacksMessageType::Ping(PingData { nonce: 2 }), + ); + msg2.sign(2, &StacksPrivateKey::new()).unwrap(); + + let pkey_1 = StacksPrivateKey::new(); + let pkey_2 = StacksPrivateKey::new(); + + let pushed_pkey_1 = StacksPrivateKey::new(); + let pushed_pkey_2 = StacksPrivateKey::new(); + + let uploaded_pkey_1 = StacksPrivateKey::new(); + let uploaded_pkey_2 = StacksPrivateKey::new(); + + let blk1 = make_empty_coinbase_block(&pkey_1); + let blk2 = make_empty_coinbase_block(&pkey_2); + + let pushed_blk1 = make_empty_coinbase_block(&pushed_pkey_1); + let pushed_blk2 = make_empty_coinbase_block(&pushed_pkey_2); + + let uploaded_blk1 = make_empty_coinbase_block(&uploaded_pkey_1); + let uploaded_blk2 = make_empty_coinbase_block(&uploaded_pkey_2); + + let mblk1 = make_codec_test_microblock(1); + let mblk2 = make_codec_test_microblock(2); + + let pushed_mblk1 = make_codec_test_microblock(3); + let pushed_mblk2 = make_codec_test_microblock(4); + + let uploaded_mblk1 = make_codec_test_microblock(5); + let uploaded_mblk2 = make_codec_test_microblock(6); + + let pushed_tx1 = make_codec_test_microblock(3).txs[2].clone(); + let pushed_tx2 = make_codec_test_microblock(4).txs[3].clone(); + + let uploaded_tx1 = make_codec_test_microblock(5).txs[4].clone(); + let uploaded_tx2 = make_codec_test_microblock(6).txs[5].clone(); + + let synced_tx1 = make_codec_test_microblock(7).txs[6].clone(); + let synced_tx2 = make_codec_test_microblock(8).txs[7].clone(); + + let naka_header_1 = NakamotoBlockHeader { + version: 1, + chain_length: 1, + burn_spent: 1, + consensus_hash: ConsensusHash([0x01; 20]), + parent_block_id: StacksBlockId([0x01; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x01; 32]), + state_index_root: TrieHash([0x01; 32]), + timestamp: 1, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_header_2 = NakamotoBlockHeader { + version: 2, + chain_length: 2, + burn_spent: 2, + consensus_hash: ConsensusHash([0x02; 20]), + parent_block_id: StacksBlockId([0x02; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x02; 32]), + state_index_root: TrieHash([0x02; 32]), + timestamp: 2, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_pushed_header_1 = NakamotoBlockHeader { + version: 3, + chain_length: 3, + burn_spent: 3, + consensus_hash: ConsensusHash([0x03; 20]), + parent_block_id: StacksBlockId([0x03; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x03; 32]), + state_index_root: TrieHash([0x03; 32]), + timestamp: 3, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_pushed_header_2 = NakamotoBlockHeader { + version: 4, + chain_length: 4, + burn_spent: 4, + consensus_hash: ConsensusHash([0x04; 20]), + parent_block_id: StacksBlockId([0x04; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x04; 32]), + state_index_root: TrieHash([0x04; 32]), + timestamp: 4, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_uploaded_header_1 = NakamotoBlockHeader { + version: 5, + chain_length: 5, + burn_spent: 5, + consensus_hash: ConsensusHash([0x05; 20]), + parent_block_id: StacksBlockId([0x05; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x05; 32]), + state_index_root: TrieHash([0x05; 32]), + timestamp: 5, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_uploaded_header_2 = NakamotoBlockHeader { + version: 6, + chain_length: 6, + burn_spent: 6, + consensus_hash: ConsensusHash([0x06; 20]), + parent_block_id: StacksBlockId([0x06; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), + state_index_root: TrieHash([0x06; 32]), + timestamp: 6, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let nblk1 = NakamotoBlock { + header: naka_header_1.clone(), + txs: vec![], + }; + let nblk2 = NakamotoBlock { + header: naka_header_2.clone(), + txs: vec![], + }; + + let pushed_nblk1 = NakamotoBlock { + header: naka_pushed_header_1.clone(), + txs: vec![], + }; + let pushed_nblk2 = NakamotoBlock { + header: naka_pushed_header_2.clone(), + txs: vec![], + }; + + let uploaded_nblk1 = NakamotoBlock { + header: naka_uploaded_header_1.clone(), + txs: vec![], + }; + let uploaded_nblk2 = NakamotoBlock { + header: naka_uploaded_header_2.clone(), + txs: vec![], + }; + + let pushed_stackerdb_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x11; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![1], + }, + }; + + let pushed_stackerdb_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x22; 20]), + chunk_data: StackerDBChunkData { + slot_id: 2, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![2], + }, + }; + + let uploaded_stackerdb_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + chunk_data: StackerDBChunkData { + slot_id: 3, + slot_version: 3, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let uploaded_stackerdb_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x44; 20]), + chunk_data: StackerDBChunkData { + slot_id: 4, + slot_version: 4, + sig: MessageSignature::empty(), + data: vec![4], + }, + }; + + network_result_1 + .unhandled_messages + .insert(nk1.clone(), vec![msg1.clone()]); + network_result_1 + .blocks + .push((ConsensusHash([0x11; 20]), blk1.clone(), 1)); + network_result_1.confirmed_microblocks.push(( + ConsensusHash([0x11; 20]), + vec![mblk1.clone()], + 1, + )); + network_result_1 + .nakamoto_blocks + .insert(nblk1.block_id(), nblk1.clone()); + network_result_1 + .pushed_transactions + .insert(nk1.clone(), vec![(vec![], pushed_tx1.clone())]); + network_result_1.pushed_blocks.insert( + nk1.clone(), + vec![BlocksData { + blocks: vec![BlocksDatum(ConsensusHash([0x11; 20]), pushed_blk1.clone())], + }], + ); + network_result_1.pushed_microblocks.insert( + nk1.clone(), + vec![( + vec![], + MicroblocksData { + index_anchor_block: StacksBlockId([0x11; 32]), + microblocks: vec![pushed_mblk1.clone()], + }, + )], + ); + network_result_1.pushed_nakamoto_blocks.insert( + nk1.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![pushed_nblk1], + }, + )], + ); + network_result_1 + .uploaded_transactions + .push(uploaded_tx1.clone()); + network_result_1.uploaded_blocks.push(BlocksData { + blocks: vec![BlocksDatum( + ConsensusHash([0x11; 20]), + uploaded_blk1.clone(), + )], + }); + network_result_1.uploaded_microblocks.push(MicroblocksData { + index_anchor_block: StacksBlockId([0x11; 32]), + microblocks: vec![uploaded_mblk1.clone()], + }); + network_result_1 + .uploaded_nakamoto_blocks + .push(uploaded_nblk1.clone()); + network_result_1 + .pushed_stackerdb_chunks + .push(pushed_stackerdb_chunk_1.clone()); + network_result_1 + .uploaded_stackerdb_chunks + .push(uploaded_stackerdb_chunk_1.clone()); + network_result_1.synced_transactions.push(synced_tx1); + + network_result_2 + .unhandled_messages + .insert(nk2.clone(), vec![msg2.clone()]); + network_result_2 + .blocks + .push((ConsensusHash([0x22; 20]), blk2.clone(), 2)); + network_result_2.confirmed_microblocks.push(( + ConsensusHash([0x22; 20]), + vec![mblk2.clone()], + 2, + )); + network_result_2 + .nakamoto_blocks + .insert(nblk2.block_id(), nblk2.clone()); + network_result_2 + .pushed_transactions + .insert(nk2.clone(), vec![(vec![], pushed_tx2.clone())]); + network_result_2.pushed_blocks.insert( + nk2.clone(), + vec![BlocksData { + blocks: vec![BlocksDatum(ConsensusHash([0x22; 20]), pushed_blk2.clone())], + }], + ); + network_result_2.pushed_microblocks.insert( + nk2.clone(), + vec![( + vec![], + MicroblocksData { + index_anchor_block: StacksBlockId([0x22; 32]), + microblocks: vec![pushed_mblk2.clone()], + }, + )], + ); + network_result_2.pushed_nakamoto_blocks.insert( + nk2.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![pushed_nblk2], + }, + )], + ); + network_result_2 + .uploaded_transactions + .push(uploaded_tx2.clone()); + network_result_2.uploaded_blocks.push(BlocksData { + blocks: vec![BlocksDatum( + ConsensusHash([0x22; 20]), + uploaded_blk2.clone(), + )], + }); + network_result_2.uploaded_microblocks.push(MicroblocksData { + index_anchor_block: StacksBlockId([0x22; 32]), + microblocks: vec![uploaded_mblk2.clone()], + }); + network_result_2 + .uploaded_nakamoto_blocks + .push(uploaded_nblk2.clone()); + network_result_2 + .pushed_stackerdb_chunks + .push(pushed_stackerdb_chunk_2.clone()); + network_result_2 + .uploaded_stackerdb_chunks + .push(uploaded_stackerdb_chunk_2.clone()); + network_result_2.synced_transactions.push(synced_tx2); + + let mut network_result_union = network_result_2.clone(); + let mut n1 = network_result_1.clone(); + network_result_union + .unhandled_messages + .extend(n1.unhandled_messages.into_iter()); + network_result_union.blocks.append(&mut n1.blocks); + network_result_union + .confirmed_microblocks + .append(&mut n1.confirmed_microblocks); + network_result_union + .nakamoto_blocks + .extend(n1.nakamoto_blocks.into_iter()); + network_result_union + .pushed_transactions + .extend(n1.pushed_transactions.into_iter()); + network_result_union + .pushed_blocks + .extend(n1.pushed_blocks.into_iter()); + network_result_union + .pushed_microblocks + .extend(n1.pushed_microblocks.into_iter()); + network_result_union + .pushed_nakamoto_blocks + .extend(n1.pushed_nakamoto_blocks.into_iter()); + network_result_union + .uploaded_transactions + .append(&mut n1.uploaded_transactions); + network_result_union + .uploaded_blocks + .append(&mut n1.uploaded_blocks); + network_result_union + .uploaded_microblocks + .append(&mut n1.uploaded_microblocks); + network_result_union + .uploaded_nakamoto_blocks + .append(&mut n1.uploaded_nakamoto_blocks); + // stackerdb chunks from n1 get dropped since their rc_consensus_hash no longer matches + network_result_union + .synced_transactions + .append(&mut n1.synced_transactions); + + // update is idempotent + let old = network_result_1.clone(); + let new = network_result_1.clone(); + assert_eq!(old.update(new), network_result_1); + + // disjoint results get unioned, except for stackerdb chunks + let old = network_result_1.clone(); + let new = network_result_2.clone(); + assert_eq!(old.update(new), network_result_union); + + // merging a subset is idempotent + assert_eq!( + network_result_1 + .clone() + .update(network_result_union.clone()), + network_result_union + ); + assert_eq!( + network_result_2 + .clone() + .update(network_result_union.clone()), + network_result_union + ); + + // stackerdb uploaded chunks get consolidated correctly + let mut old = NetworkResult::new( + StacksBlockId([0xaa; 32]), + 10, + 10, + 10, + 10, + 10, + 10, + 10, + ConsensusHash([0xaa; 20]), + HashMap::new(), + ); + let mut new = old.clone(); + + let old_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 2, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + old.uploaded_stackerdb_chunks.push(old_chunk_1.clone()); + // replaced + new.uploaded_stackerdb_chunks.push(new_chunk_1.clone()); + // included + new.uploaded_stackerdb_chunks.push(new_chunk_2.clone()); + + assert_eq!( + old.update(new).uploaded_stackerdb_chunks, + vec![new_chunk_1.clone(), new_chunk_2.clone()] + ); + + // stackerdb pushed chunks get consolidated correctly + let mut old = NetworkResult::new( + StacksBlockId([0xaa; 32]), + 10, + 10, + 10, + 10, + 10, + 10, + 10, + ConsensusHash([0xaa; 20]), + HashMap::new(), + ); + let mut new = old.clone(); + + let old_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 2, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + old.pushed_stackerdb_chunks.push(old_chunk_1.clone()); + // replaced + new.pushed_stackerdb_chunks.push(new_chunk_1.clone()); + // included + new.pushed_stackerdb_chunks.push(new_chunk_2.clone()); + + assert_eq!( + old.update(new).pushed_stackerdb_chunks, + vec![new_chunk_1.clone(), new_chunk_2.clone()] + ); + + // nakamoto blocks obtained via download, upload, or pushed get consoldated + let mut old = NetworkResult::new( + StacksBlockId([0xbb; 32]), + 11, + 11, + 11, + 11, + 11, + 11, + 11, + ConsensusHash([0xbb; 20]), + HashMap::new(), + ); + old.nakamoto_blocks.insert(nblk1.block_id(), nblk1.clone()); + old.pushed_nakamoto_blocks.insert( + nk1.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![nblk1.clone()], + }, + )], + ); + old.uploaded_nakamoto_blocks.push(nblk1.clone()); + + let new = NetworkResult::new( + StacksBlockId([0xbb; 32]), + 11, + 11, + 11, + 11, + 11, + 11, + 11, + ConsensusHash([0xbb; 20]), + HashMap::new(), + ); + + let mut new_pushed = new.clone(); + let mut new_uploaded = new.clone(); + let mut new_downloaded = new.clone(); + + new_downloaded + .nakamoto_blocks + .insert(nblk1.block_id(), nblk1.clone()); + new_pushed.pushed_nakamoto_blocks.insert( + nk2.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![nblk1.clone()], + }, + )], + ); + new_uploaded.uploaded_nakamoto_blocks.push(nblk1.clone()); + + debug!("===="); + let updated_downloaded = old.clone().update(new_downloaded); + assert_eq!(updated_downloaded.nakamoto_blocks.len(), 1); + assert_eq!( + updated_downloaded + .nakamoto_blocks + .get(&nblk1.block_id()) + .unwrap(), + &nblk1 + ); + assert_eq!(updated_downloaded.pushed_nakamoto_blocks.len(), 0); + assert_eq!(updated_downloaded.uploaded_nakamoto_blocks.len(), 0); + + debug!("===="); + let updated_pushed = old.clone().update(new_pushed); + assert_eq!(updated_pushed.nakamoto_blocks.len(), 0); + assert_eq!(updated_pushed.pushed_nakamoto_blocks.len(), 1); + assert_eq!( + updated_pushed + .pushed_nakamoto_blocks + .get(&nk2) + .unwrap() + .len(), + 1 + ); + assert_eq!( + updated_pushed.pushed_nakamoto_blocks.get(&nk2).unwrap()[0] + .1 + .blocks + .len(), + 1 + ); + assert_eq!( + updated_pushed.pushed_nakamoto_blocks.get(&nk2).unwrap()[0] + .1 + .blocks[0], + nblk1 + ); + assert_eq!(updated_pushed.uploaded_nakamoto_blocks.len(), 0); + + debug!("===="); + let updated_uploaded = old.clone().update(new_uploaded); + assert_eq!(updated_uploaded.nakamoto_blocks.len(), 0); + assert_eq!(updated_uploaded.pushed_nakamoto_blocks.len(), 0); + assert_eq!(updated_uploaded.uploaded_nakamoto_blocks.len(), 1); + assert_eq!(updated_uploaded.uploaded_nakamoto_blocks[0], nblk1); +} diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 23d1dd60a8..4f18e109a5 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -3097,6 +3097,8 @@ fn process_new_blocks_rejects_problematic_asts() { 0, 0, 0, + 0, + 0, ConsensusHash([0x01; 20]), HashMap::new(), ); diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index d10a6ee368..231e0a91af 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -1221,13 +1221,14 @@ impl PeerNetwork { ) { // unable to store this due to quota being exceeded + debug!("{:?}: drop message to quota being exceeded: {:?}", self.get_local_peer(), &message.payload.get_message_description()); return false; } if !buffer { debug!( "{:?}: Re-try handling buffered sortition-bound message {} from {:?}", - &self.get_local_peer(), + self.get_local_peer(), &message.payload.get_message_description(), &neighbor_key ); diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 958820b491..0c68d22ee7 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -32,6 +32,7 @@ rusqlite = { workspace = true } async-h1 = { version = "2.3.2", optional = true } async-std = { version = "1.6", optional = true, features = ["attributes"] } http-types = { version = "2.12", optional = true } +thiserror = { workspace = true } [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 82282926d3..582b46a2fd 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -135,16 +135,16 @@ pub fn addr2str(btc_addr: &BitcoinAddress) -> String { if let BitcoinAddress::Segwit(segwit_addr) = btc_addr { // regtest segwit addresses use a different hrp let s = segwit_addr.to_bech32_hrp("bcrt"); - warn!("Re-encoding {} to {}", &segwit_addr, &s); + warn!("Re-encoding {segwit_addr} to {s}"); s } else { - format!("{}", &btc_addr) + format!("{btc_addr}") } } #[cfg(not(test))] pub fn addr2str(btc_addr: &BitcoinAddress) -> String { - format!("{}", &btc_addr) + format!("{btc_addr}") } // TODO: add tests from mutation testing results #4862 @@ -186,12 +186,11 @@ pub fn make_bitcoin_indexer( let (_, network_type) = config.burnchain.get_bitcoin_network(); let indexer_runtime = BitcoinIndexerRuntime::new(network_type); - let burnchain_indexer = BitcoinIndexer { + BitcoinIndexer { config: indexer_config, runtime: indexer_runtime, - should_keep_running: should_keep_running, - }; - burnchain_indexer + should_keep_running, + } } pub fn get_satoshis_per_byte(config: &Config) -> u64 { @@ -215,7 +214,7 @@ impl LeaderBlockCommitFees { let mut fees = LeaderBlockCommitFees::estimated_fees_from_payload(payload, config); fees.spent_in_attempts = cmp::max(1, self.spent_in_attempts); fees.final_size = self.final_size; - fees.fee_rate = self.fee_rate + get_rbf_fee_increment(&config); + fees.fee_rate = self.fee_rate + get_rbf_fee_increment(config); fees.is_rbf_enabled = true; fees } @@ -306,8 +305,7 @@ impl BitcoinRegtestController { burnchain: Option, should_keep_running: Option>, ) -> Self { - std::fs::create_dir_all(&config.get_burnchain_path_str()) - .expect("Unable to create workdir"); + std::fs::create_dir_all(config.get_burnchain_path_str()).expect("Unable to create workdir"); let (_, network_id) = config.burnchain.get_bitcoin_network(); let res = SpvClient::new( @@ -319,15 +317,15 @@ impl BitcoinRegtestController { false, ); if let Err(err) = res { - error!("Unable to init block headers: {}", err); + error!("Unable to init block headers: {err}"); panic!() } let burnchain_params = burnchain_params_from_config(&config.burnchain); if network_id == BitcoinNetworkType::Mainnet && config.burnchain.epochs.is_some() { - panic!("It is an error to set custom epochs while running on Mainnet: network_id {:?} config.burnchain {:#?}", - &network_id, &config.burnchain); + panic!("It is an error to set custom epochs while running on Mainnet: network_id {network_id:?} config.burnchain {:#?}", + &config.burnchain); } let indexer_config = { @@ -434,11 +432,10 @@ impl BitcoinRegtestController { /// Get the default Burnchain instance from our config fn default_burnchain(&self) -> Burnchain { - let burnchain = match &self.burnchain_config { + match &self.burnchain_config { Some(burnchain) => burnchain.clone(), None => self.config.get_burnchain(), - }; - burnchain + } } /// Get the PoX constants in use @@ -465,7 +462,7 @@ impl BitcoinRegtestController { } Err(e) => { // keep trying - error!("Unable to sync with burnchain: {}", e); + error!("Unable to sync with burnchain: {e}"); match e { burnchain_error::TrySyncAgain => { // try again immediately @@ -491,7 +488,7 @@ impl BitcoinRegtestController { (None, Some(chain_tip)) => chain_tip.clone(), (Some(state_transition), _) => { let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, + block_snapshot, state_transition: BurnchainStateTransitionOps::from(state_transition), received_at: Instant::now(), }; @@ -501,7 +498,7 @@ impl BitcoinRegtestController { (None, None) => { // can happen at genesis let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, + block_snapshot, state_transition: BurnchainStateTransitionOps::noop(), received_at: Instant::now(), }; @@ -576,7 +573,7 @@ impl BitcoinRegtestController { } Err(e) => { // keep trying - error!("Unable to sync with burnchain: {}", e); + error!("Unable to sync with burnchain: {e}"); match e { burnchain_error::CoordinatorClosed => { return Err(BurnchainControllerError::CoordinatorClosed) @@ -602,8 +599,8 @@ impl BitcoinRegtestController { }; let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, - state_transition: state_transition, + block_snapshot, + state_transition, received_at: Instant::now(), }; @@ -641,11 +638,11 @@ impl BitcoinRegtestController { let filter_addresses = vec![addr2str(&address)]; let pubk = if self.config.miner.segwit { - let mut p = public_key.clone(); + let mut p = *public_key; p.set_compressed(true); p } else { - public_key.clone() + *public_key }; test_debug!("Import public key '{}'", &pubk.to_hex()); @@ -685,7 +682,7 @@ impl BitcoinRegtestController { let parsed_utxo: ParsedUTXO = match serde_json::from_value(entry) { Ok(utxo) => utxo, Err(err) => { - warn!("Failed parsing UTXO: {}", err); + warn!("Failed parsing UTXO: {err}"); continue; } }; @@ -753,11 +750,11 @@ impl BitcoinRegtestController { } let pubk = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { - let mut p = public_key.clone(); + let mut p = *public_key; p.set_compressed(true); p } else { - public_key.clone() + *public_key }; // Configure UTXO filter @@ -786,7 +783,7 @@ impl BitcoinRegtestController { break utxos; } Err(e) => { - error!("Bitcoin RPC failure: error listing utxos {:?}", e); + error!("Bitcoin RPC failure: error listing utxos {e:?}"); sleep_ms(5000); continue; } @@ -817,13 +814,13 @@ impl BitcoinRegtestController { utxos = match result { Ok(utxos) => utxos, Err(e) => { - error!("Bitcoin RPC failure: error listing utxos {:?}", e); + error!("Bitcoin RPC failure: error listing utxos {e:?}"); sleep_ms(5000); continue; } }; - test_debug!("Unspent for {:?}: {:?}", &filter_addresses, &utxos); + test_debug!("Unspent for {filter_addresses:?}: {utxos:?}"); if utxos.is_empty() { return None; @@ -832,20 +829,14 @@ impl BitcoinRegtestController { } } } else { - debug!( - "Got {} UTXOs for {:?}", - utxos.utxos.len(), - &filter_addresses - ); + debug!("Got {} UTXOs for {filter_addresses:?}", utxos.utxos.len(),); utxos }; let total_unspent = utxos.total_available(); if total_unspent < total_required { warn!( - "Total unspent {} < {} for {:?}", - total_unspent, - total_required, + "Total unspent {total_unspent} < {total_required} for {:?}", &pubk.to_hex() ); return None; @@ -1013,7 +1004,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1026,10 +1017,8 @@ impl BitcoinRegtestController { }; tx.output = vec![consensus_output]; - tx.output.push( - PoxAddress::Standard(payload.recipient.clone(), None) - .to_bitcoin_tx_out(DUST_UTXO_LIMIT), - ); + tx.output + .push(PoxAddress::Standard(payload.recipient, None).to_bitcoin_tx_out(DUST_UTXO_LIMIT)); self.finalize_tx( epoch_id, @@ -1099,7 +1088,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1113,8 +1102,7 @@ impl BitcoinRegtestController { tx.output = vec![consensus_output]; tx.output.push( - PoxAddress::Standard(payload.delegate_to.clone(), None) - .to_bitcoin_tx_out(DUST_UTXO_LIMIT), + PoxAddress::Standard(payload.delegate_to, None).to_bitcoin_tx_out(DUST_UTXO_LIMIT), ); self.finalize_tx( @@ -1180,7 +1168,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1271,7 +1259,7 @@ impl BitcoinRegtestController { tx.output = vec![consensus_output]; tx.output - .push(PoxAddress::Standard(payload.output.clone(), None).to_bitcoin_tx_out(output_amt)); + .push(PoxAddress::Standard(payload.output, None).to_bitcoin_tx_out(output_amt)); self.finalize_tx( epoch_id, @@ -1347,7 +1335,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1388,10 +1376,9 @@ impl BitcoinRegtestController { fn magic_bytes(&self) -> Vec { #[cfg(test)] { - if let Some(set_bytes) = TEST_MAGIC_BYTES + if let Some(set_bytes) = *TEST_MAGIC_BYTES .lock() .expect("FATAL: test magic bytes mutex poisoned") - .clone() { return set_bytes.to_vec(); } @@ -1399,6 +1386,7 @@ impl BitcoinRegtestController { self.config.burnchain.magic_bytes.as_bytes().to_vec() } + #[allow(clippy::too_many_arguments)] fn send_block_commit_operation( &mut self, epoch_id: StacksEpochId, @@ -1407,7 +1395,7 @@ impl BitcoinRegtestController { utxos_to_include: Option, utxos_to_exclude: Option, previous_fees: Option, - previous_txids: &Vec, + previous_txids: &[Txid], ) -> Result { let _ = self.sortdb_mut(); let burn_chain_tip = self @@ -1433,6 +1421,7 @@ impl BitcoinRegtestController { ) } + #[allow(clippy::too_many_arguments)] fn send_block_commit_operation_at_burnchain_height( &mut self, epoch_id: StacksEpochId, @@ -1441,7 +1430,7 @@ impl BitcoinRegtestController { utxos_to_include: Option, utxos_to_exclude: Option, mut estimated_fees: LeaderBlockCommitFees, - previous_txids: &Vec, + previous_txids: &[Txid], burnchain_block_height: u64, ) -> Result { let public_key = signer.get_public_key(); @@ -1500,10 +1489,10 @@ impl BitcoinRegtestController { let mut txid = tx.txid().as_bytes().to_vec(); txid.reverse(); - debug!("Transaction relying on UTXOs: {:?}", utxos); + debug!("Transaction relying on UTXOs: {utxos:?}"); let txid = Txid::from_bytes(&txid[..]).unwrap(); - let mut txids = previous_txids.clone(); - txids.push(txid.clone()); + let mut txids = previous_txids.to_vec(); + txids.push(txid); let ongoing_block_commit = OngoingBlockCommit { payload, utxos, @@ -1512,12 +1501,11 @@ impl BitcoinRegtestController { }; info!( - "Miner node: submitting leader_block_commit (txid: {}, rbf: {}, total spent: {}, size: {}, fee_rate: {})", + "Miner node: submitting leader_block_commit (txid: {}, rbf: {}, total spent: {}, size: {}, fee_rate: {fee_rate})", txid.to_hex(), ongoing_block_commit.fees.is_rbf_enabled, ongoing_block_commit.fees.total_spent(), - ongoing_block_commit.fees.final_size, - fee_rate, + ongoing_block_commit.fees.final_size ); self.ongoing_block_commit = Some(ongoing_block_commit); @@ -1537,15 +1525,8 @@ impl BitcoinRegtestController { // Are we currently tracking an operation? if self.ongoing_block_commit.is_none() || !self.allow_rbf { // Good to go, let's build the transaction and send it. - let res = self.send_block_commit_operation( - epoch_id, - payload, - signer, - None, - None, - None, - &vec![], - ); + let res = + self.send_block_commit_operation(epoch_id, payload, signer, None, None, None, &[]); return res; } @@ -1563,10 +1544,7 @@ impl BitcoinRegtestController { Ok(true) ); if ongoing_tx_confirmed { - debug!( - "Was able to retrieve confirmation of ongoing burnchain TXID - {}", - txid - ); + debug!("Was able to retrieve confirmation of ongoing burnchain TXID - {txid}"); let res = self.send_block_commit_operation( epoch_id, payload, @@ -1574,11 +1552,11 @@ impl BitcoinRegtestController { None, None, None, - &vec![], + &[], ); return res; } else { - debug!("Was unable to retrieve ongoing TXID - {}", txid); + debug!("Was unable to retrieve ongoing TXID - {txid}"); }; } @@ -1589,13 +1567,13 @@ impl BitcoinRegtestController { .map_err(|_| BurnchainControllerError::BurnchainError)?; let mut found_last_mined_at = false; while traversal_depth < UTXO_CACHE_STALENESS_LIMIT { - if &burn_chain_tip.block_hash == &ongoing_op.utxos.bhh { + if burn_chain_tip.block_hash == ongoing_op.utxos.bhh { found_last_mined_at = true; break; } let parent = BurnchainDB::get_burnchain_block( - &burnchain_db.conn(), + burnchain_db.conn(), &burn_chain_tip.parent_block_hash, ) .map_err(|_| BurnchainControllerError::BurnchainError)?; @@ -1609,15 +1587,8 @@ impl BitcoinRegtestController { "Possible presence of fork or stale UTXO cache, invalidating cached set of UTXOs."; "cached_burn_block_hash" => %ongoing_op.utxos.bhh, ); - let res = self.send_block_commit_operation( - epoch_id, - payload, - signer, - None, - None, - None, - &vec![], - ); + let res = + self.send_block_commit_operation(epoch_id, payload, signer, None, None, None, &[]); return res; } @@ -1659,7 +1630,7 @@ impl BitcoinRegtestController { None, Some(ongoing_op.utxos.clone()), None, - &vec![], + &[], ) } else { // Case 2) ii): Attempt to RBF @@ -1724,9 +1695,9 @@ impl BitcoinRegtestController { } else { // Fetch some UTXOs let addr = self.get_miner_address(epoch_id, public_key); - let utxos = match self.get_utxos( + match self.get_utxos( epoch_id, - &public_key, + public_key, total_required, utxos_to_exclude, block_height, @@ -1734,15 +1705,13 @@ impl BitcoinRegtestController { Some(utxos) => utxos, None => { warn!( - "No UTXOs for {} ({}) in epoch {}", + "No UTXOs for {} ({}) in epoch {epoch_id}", &public_key.to_hex(), - &addr2str(&addr), - epoch_id + &addr2str(&addr) ); return Err(BurnchainControllerError::NoUTXOs); } - }; - utxos + } }; // Prepare a backbone for the tx @@ -1756,6 +1725,7 @@ impl BitcoinRegtestController { Ok((transaction, utxos)) } + #[allow(clippy::too_many_arguments)] fn finalize_tx( &mut self, epoch_id: StacksEpochId, @@ -1854,18 +1824,14 @@ impl BitcoinRegtestController { } if total_consumed < total_target { - warn!( - "Consumed total {} is less than intended spend: {}", - total_consumed, total_target - ); + warn!("Consumed total {total_consumed} is less than intended spend: {total_target}"); return false; } // Append the change output let value = total_consumed - tx_cost; debug!( - "Payments value: {:?}, total_consumed: {:?}, total_spent: {:?}", - value, total_consumed, total_target + "Payments value: {value:?}, total_consumed: {total_consumed:?}, total_spent: {total_target:?}" ); if value >= DUST_UTXO_LIMIT { let change_output = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { @@ -1884,7 +1850,7 @@ impl BitcoinRegtestController { debug!("Not enough change to clear dust limit. Not adding change address."); } - for (_i, utxo) in utxos_set.utxos.iter().enumerate() { + for utxo in utxos_set.utxos.iter() { let input = TxIn { previous_output: OutPoint { txid: utxo.txid, @@ -1958,8 +1924,8 @@ impl BitcoinRegtestController { transaction.txid() }) .map_err(|e| { - error!("Bitcoin RPC error: transaction submission failed - {:?}", e); - BurnchainControllerError::TransactionSubmissionFailed(format!("{:?}", e)) + error!("Bitcoin RPC error: transaction submission failed - {e:?}"); + BurnchainControllerError::TransactionSubmissionFailed(format!("{e:?}")) }) } @@ -1977,8 +1943,8 @@ impl BitcoinRegtestController { if debug_ctr % 10 == 0 { debug!( - "Waiting until canonical sortition height reaches {} (currently {})", - height_to_wait, canonical_sortition_tip.block_height + "Waiting until canonical sortition height reaches {height_to_wait} (currently {})", + canonical_sortition_tip.block_height ); } debug_ctr += 1; @@ -2012,7 +1978,7 @@ impl BitcoinRegtestController { /// Instruct a regtest Bitcoin node to build the next block. pub fn build_next_block(&self, num_blocks: u64) { - debug!("Generate {} block(s)", num_blocks); + debug!("Generate {num_blocks} block(s)"); let public_key_bytes = match &self.config.burnchain.local_mining_public_key { Some(public_key) => hex_bytes(public_key).expect("Invalid byte sequence"), None => panic!("Unable to make new block, mining public key"), @@ -2028,7 +1994,7 @@ impl BitcoinRegtestController { match result { Ok(_) => {} Err(e) => { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } } @@ -2036,7 +2002,7 @@ impl BitcoinRegtestController { #[cfg(test)] pub fn invalidate_block(&self, block: &BurnchainHeaderHash) { - info!("Invalidating block {}", &block); + info!("Invalidating block {block}"); let request = BitcoinRPCRequest { method: "invalidateblock".into(), params: vec![json!(&block.to_string())], @@ -2044,7 +2010,7 @@ impl BitcoinRegtestController { jsonrpc: "2.0".into(), }; if let Err(e) = BitcoinRPCRequest::send(&self.config, request) { - error!("Bitcoin RPC failure: error invalidating block {:?}", e); + error!("Bitcoin RPC failure: error invalidating block {e:?}"); panic!(); } } @@ -2062,7 +2028,7 @@ impl BitcoinRegtestController { BurnchainHeaderHash::from_hex(v.get("result").unwrap().as_str().unwrap()).unwrap() } Err(e) => { - error!("Bitcoin RPC failure: error invalidating block {:?}", e); + error!("Bitcoin RPC failure: error invalidating block {e:?}"); panic!(); } } @@ -2118,7 +2084,7 @@ impl BitcoinRegtestController { } }; - transaction.map(|tx| SerializedTx::new(tx)) + transaction.map(SerializedTx::new) } #[cfg(test)] @@ -2139,7 +2105,7 @@ impl BitcoinRegtestController { for pk in pks { debug!("Import public key '{}'", &pk.to_hex()); - if let Err(e) = BitcoinRPCRequest::import_public_key(&self.config, &pk) { + if let Err(e) = BitcoinRPCRequest::import_public_key(&self.config, pk) { warn!("Error when importing pubkey: {e:?}"); } } @@ -2157,7 +2123,7 @@ impl BitcoinRegtestController { num_blocks.try_into().unwrap(), addr2str(&address), ) { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } return; @@ -2165,7 +2131,7 @@ impl BitcoinRegtestController { // otherwise, round robin generate blocks for i in 0..num_blocks { - let pk = &pks[usize::try_from(i % pks.len()).unwrap()]; + let pk = &pks[i % pks.len()]; let address = self.get_miner_address(StacksEpochId::Epoch21, pk); if i < pks.len() { debug!( @@ -2177,7 +2143,7 @@ impl BitcoinRegtestController { if let Err(e) = BitcoinRPCRequest::generate_to_address(&self.config, 1, addr2str(&address)) { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } } @@ -2249,10 +2215,7 @@ impl BurnchainController for BitcoinRegtestController { target_block_height_opt: Option, ) -> Result<(BurnchainTip, u64), BurnchainControllerError> { // if no target block height is given, just fetch the first burnchain block. - self.receive_blocks( - false, - target_block_height_opt.map_or_else(|| Some(1), |x| Some(x)), - ) + self.receive_blocks(false, target_block_height_opt.map_or_else(|| Some(1), Some)) } fn sync( @@ -2271,10 +2234,7 @@ impl BurnchainController for BitcoinRegtestController { // Evaluate process_exit_at_block_height setting if let Some(cap) = self.config.burnchain.process_exit_at_block_height { if burnchain_tip.block_snapshot.block_height >= cap { - info!( - "Node succesfully reached the end of the ongoing {} blocks epoch!", - cap - ); + info!("Node succesfully reached the end of the ongoing {cap} blocks epoch!"); info!("This process will automatically terminate in 30s, restart your node for participating in the next epoch."); sleep_ms(30000); std::process::exit(0); @@ -2351,13 +2311,12 @@ impl SerializedTx { } pub fn txid(&self) -> Txid { - self.txid.clone() + self.txid } pub fn to_hex(&self) -> String { - let formatted_bytes: Vec = - self.bytes.iter().map(|b| format!("{:02x}", b)).collect(); - format!("{}", formatted_bytes.join("")) + let formatted_bytes: Vec = self.bytes.iter().map(|b| format!("{b:02x}")).collect(); + formatted_bytes.join("").to_string() } } @@ -2389,7 +2348,7 @@ impl ParsedUTXO { Some(Sha256dHash::from(&txid[..])) } Err(err) => { - warn!("Unable to get txid from UTXO {}", err); + warn!("Unable to get txid from UTXO {err}"); None } } @@ -2418,8 +2377,8 @@ impl ParsedUTXO { Some(amount) } (lhs, rhs) => { - warn!("Error while converting BTC to sat {:?} - {:?}", lhs, rhs); - return None; + warn!("Error while converting BTC to sat {lhs:?} - {rhs:?}"); + None } } } @@ -2431,7 +2390,7 @@ impl ParsedUTXO { let base: u64 = 10; let int_part = amount / base.pow(8); let frac_part = amount % base.pow(8); - let amount = format!("{}.{:08}", int_part, frac_part); + let amount = format!("{int_part}.{frac_part:08}"); amount } @@ -2469,13 +2428,13 @@ type RPCResult = Result; impl From for RPCError { fn from(ioe: io::Error) -> Self { - Self::Network(format!("IO Error: {:?}", &ioe)) + Self::Network(format!("IO Error: {ioe:?}")) } } impl From for RPCError { fn from(ne: NetError) -> Self { - Self::Network(format!("Net Error: {:?}", &ne)) + Self::Network(format!("Net Error: {ne:?}")) } } @@ -2488,11 +2447,11 @@ impl BitcoinRPCRequest { _ => None, }; let url = config.burnchain.get_rpc_url(wallet_id); - Url::parse(&url).unwrap_or_else(|_| panic!("Unable to parse {} as a URL", url)) + Url::parse(&url).unwrap_or_else(|_| panic!("Unable to parse {url} as a URL")) }; debug!( - "BitcoinRPC builder '{}': {:?}:{:?}@{}", - &payload.method, &config.burnchain.username, &config.burnchain.password, &url + "BitcoinRPC builder '{}': {:?}:{:?}@{url}", + &payload.method, &config.burnchain.username, &config.burnchain.password ); let host = url @@ -2516,27 +2475,26 @@ impl BitcoinRPCRequest { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); - match (&config.burnchain.username, &config.burnchain.password) { - (Some(username), Some(password)) => { - let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); - request.add_header("Authorization".into(), auth_token); - } - (_, _) => {} - }; + if let (Some(username), Some(password)) = + (&config.burnchain.username, &config.burnchain.password) + { + let auth_token = format!("Basic {}", encode(format!("{username}:{password}"))); + request.add_header("Authorization".into(), auth_token); + } request } #[cfg(test)] pub fn get_raw_transaction(config: &Config, txid: &Txid) -> RPCResult { - debug!("Get raw transaction {}", txid); + debug!("Get raw transaction {txid}"); let payload = BitcoinRPCRequest { method: "getrawtransaction".to_string(), - params: vec![format!("{}", txid).into()], + params: vec![format!("{txid}").into()], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; - debug!("Got raw transaction {}: {:?}", txid, &res); + let res = BitcoinRPCRequest::send(config, payload)?; + debug!("Got raw transaction {txid}: {res:?}"); Ok(res.get("result").unwrap().as_str().unwrap().to_string()) } @@ -2544,11 +2502,11 @@ impl BitcoinRPCRequest { pub fn check_transaction_confirmed(config: &Config, txid: &Txid) -> RPCResult { let payload = BitcoinRPCRequest { method: "gettransaction".to_string(), - params: vec![format!("{}", txid).into()], + params: vec![format!("{txid}").into()], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; + let res = BitcoinRPCRequest::send(config, payload)?; let confirmations = res .get("result") .ok_or_else(|| RPCError::Parsing("No 'result' field in bitcoind RPC response".into()))? @@ -2567,7 +2525,7 @@ impl BitcoinRPCRequest { } pub fn generate_to_address(config: &Config, num_blocks: u64, address: String) -> RPCResult<()> { - debug!("Generate {} blocks to {}", num_blocks, &address); + debug!("Generate {num_blocks} blocks to {address}"); let payload = BitcoinRPCRequest { method: "generatetoaddress".to_string(), params: vec![num_blocks.into(), address.clone().into()], @@ -2575,11 +2533,8 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; - debug!( - "Generated {} blocks to {}: {:?}", - num_blocks, &address, &res - ); + let res = BitcoinRPCRequest::send(config, payload)?; + debug!("Generated {num_blocks} blocks to {address}: {res:?}"); Ok(()) } @@ -2598,21 +2553,17 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; - let bhh = match res.as_object_mut() { - Some(res) => { - let res = res - .get("result") - .ok_or(RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - let bhh: String = serde_json::from_value(res.to_owned()) - .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - let bhh = BurnchainHeaderHash::from_hex(&bhh) - .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - bhh - } - _ => return Err(RPCError::Parsing("Failed to get UTXOs".to_string())), + let mut res = BitcoinRPCRequest::send(config, payload)?; + let Some(res) = res.as_object_mut() else { + return Err(RPCError::Parsing("Failed to get UTXOs".to_string())); }; - + let res = res + .get("result") + .ok_or(RPCError::Parsing("Failed to get bestblockhash".to_string()))?; + let bhh_string: String = serde_json::from_value(res.to_owned()) + .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; + let bhh = BurnchainHeaderHash::from_hex(&bhh_string) + .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; let min_conf = 0i64; let max_conf = 9999999i64; let minimum_amount = ParsedUTXO::sat_to_serialized_btc(minimum_sum_amount); @@ -2630,7 +2581,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; + let mut res = BitcoinRPCRequest::send(config, payload)?; let txids_to_filter = if let Some(utxos_to_exclude) = utxos_to_exclude { utxos_to_exclude .utxos @@ -2650,7 +2601,7 @@ impl BitcoinRPCRequest { let parsed_utxo: ParsedUTXO = match serde_json::from_value(entry) { Ok(utxo) => utxo, Err(err) => { - warn!("Failed parsing UTXO: {}", err); + warn!("Failed parsing UTXO: {err}"); continue; } }; @@ -2710,11 +2661,11 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let json_resp = BitcoinRPCRequest::send(&config, payload)?; + let json_resp = BitcoinRPCRequest::send(config, payload)?; if let Some(e) = json_resp.get("error") { if !e.is_null() { - error!("Error submitting transaction: {}", json_resp); + error!("Error submitting transaction: {json_resp}"); return Err(RPCError::Bitcoind(json_resp.to_string())); } } @@ -2756,9 +2707,9 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let result = BitcoinRPCRequest::send(&config, payload)?; + let result = BitcoinRPCRequest::send(config, payload)?; let checksum = result - .get(&"result".to_string()) + .get("result") .and_then(|res| res.as_object()) .and_then(|obj| obj.get("checksum")) .and_then(|checksum_val| checksum_val.as_str()) @@ -2770,13 +2721,13 @@ impl BitcoinRPCRequest { let payload = BitcoinRPCRequest { method: "importdescriptors".to_string(), params: vec![ - json!([{ "desc": format!("addr({})#{}", &addr2str(&address), &checksum), "timestamp": 0, "internal": true }]), + json!([{ "desc": format!("addr({})#{checksum}", &addr2str(&address)), "timestamp": 0, "internal": true }]), ], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; - BitcoinRPCRequest::send(&config, payload)?; + BitcoinRPCRequest::send(config, payload)?; } Ok(()) } @@ -2790,7 +2741,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; + let mut res = BitcoinRPCRequest::send(config, payload)?; let mut wallets = Vec::new(); match res.as_object_mut() { Some(ref mut object) => match object.get_mut("result") { @@ -2799,7 +2750,7 @@ impl BitcoinRPCRequest { let parsed_wallet_name: String = match serde_json::from_value(entry) { Ok(wallet_name) => wallet_name, Err(err) => { - warn!("Failed parsing wallet name: {}", err); + warn!("Failed parsing wallet name: {err}"); continue; } }; @@ -2828,12 +2779,12 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - BitcoinRPCRequest::send(&config, payload)?; + BitcoinRPCRequest::send(config, payload)?; Ok(()) } pub fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { - let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); + let request = BitcoinRPCRequest::build_rpc_request(config, &payload); let timeout = Duration::from_secs(u64::from(config.burnchain.timeout)); let host = request.preamble().host.hostname(); @@ -2841,9 +2792,9 @@ impl BitcoinRPCRequest { let response = send_http_request(&host, port, request, timeout)?; if let HttpResponsePayload::JSON(js) = response.destruct().1 { - return Ok(js); + Ok(js) } else { - return Err(RPCError::Parsing("Did not get a JSON response".into())); + Err(RPCError::Parsing("Did not get a JSON response".into())) } } } @@ -3025,12 +2976,12 @@ mod tests { Some(utxo_set), None, leader_fees, - &vec![], + &[], 2212, ) .unwrap(); - debug!("send_block_commit_operation:\n{:#?}", &block_commit); + debug!("send_block_commit_operation:\n{block_commit:#?}"); debug!("{}", &SerializedTx::new(block_commit.clone()).to_hex()); assert_eq!(block_commit.output[3].value, 323507); diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index d518f5bdea..a626cfb443 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -44,8 +44,8 @@ impl MocknetController { let burnchain = config.get_burnchain(); Self { - config: config, - burnchain: burnchain, + config, + burnchain, db: None, queued_operations: VecDeque::new(), chain_tip: None, @@ -54,7 +54,7 @@ impl MocknetController { fn build_next_block_header(current_block: &BlockSnapshot) -> BurnchainBlockHeader { let curr_hash = ¤t_block.burn_header_hash.to_bytes()[..]; - let next_hash = Sha256Sum::from_data(&curr_hash); + let next_hash = Sha256Sum::from_data(curr_hash); let block = BurnchainBlock::Bitcoin(BitcoinBlock::new( current_block.block_height + 1, diff --git a/testnet/stacks-node/src/burnchains/mod.rs b/testnet/stacks-node/src/burnchains/mod.rs index 0c9446304d..0509993dd0 100644 --- a/testnet/stacks-node/src/burnchains/mod.rs +++ b/testnet/stacks-node/src/burnchains/mod.rs @@ -1,7 +1,6 @@ pub mod bitcoin_regtest_controller; pub mod mocknet_controller; -use std::fmt; use std::time::Instant; use stacks::burnchains; @@ -16,41 +15,26 @@ pub use self::bitcoin_regtest_controller::{make_bitcoin_indexer, BitcoinRegtestC pub use self::mocknet_controller::MocknetController; use super::operations::BurnchainOpSigner; -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum Error { + #[error("ChainsCoordinator closed")] CoordinatorClosed, - IndexerError(burnchains::Error), + #[error("Indexer error: {0}")] + IndexerError(#[from] burnchains::Error), + #[error("Burnchain error")] BurnchainError, + #[error("Max fee rate exceeded")] MaxFeeRateExceeded, + #[error("Identical operation, not submitting")] IdenticalOperation, + #[error("No UTXOs available")] NoUTXOs, + #[error("Transaction submission failed: {0}")] TransactionSubmissionFailed(String), + #[error("Serializer error: {0}")] SerializerError(CodecError), } -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Error::CoordinatorClosed => write!(f, "ChainsCoordinator closed"), - Error::IndexerError(ref e) => write!(f, "Indexer error: {:?}", e), - Error::BurnchainError => write!(f, "Burnchain error"), - Error::MaxFeeRateExceeded => write!(f, "Max fee rate exceeded"), - Error::IdenticalOperation => write!(f, "Identical operation, not submitting"), - Error::NoUTXOs => write!(f, "No UTXOs available"), - Error::TransactionSubmissionFailed(e) => { - write!(f, "Transaction submission failed: {e}") - } - Error::SerializerError(e) => write!(f, "Serializer error: {e}"), - } - } -} - -impl From for Error { - fn from(e: burnchains::Error) -> Self { - Error::IndexerError(e) - } -} - pub trait BurnchainController { fn start(&mut self, target_block_height_opt: Option) -> Result<(BurnchainTip, u64), Error>; diff --git a/testnet/stacks-node/src/chain_data.rs b/testnet/stacks-node/src/chain_data.rs index b1e32c15ea..cc60f964a3 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/testnet/stacks-node/src/chain_data.rs @@ -81,7 +81,7 @@ impl MinerStats { { commits_at_sortition.push(missed); } else { - missed_commits_map.insert(missed.intended_sortition.clone(), vec![missed]); + missed_commits_map.insert(missed.intended_sortition, vec![missed]); } } @@ -106,8 +106,7 @@ impl MinerStats { &sortition_id, )?; if let Some(missed_commit_in_block) = missed_commits_map.remove(&sortition_id) { - missed_commits_at_height - .extend(missed_commit_in_block.into_iter().map(|x| x.clone())); + missed_commits_at_height.extend(missed_commit_in_block.into_iter().cloned()); } windowed_missed_commits.push(missed_commits_at_height); @@ -115,8 +114,7 @@ impl MinerStats { } else { // PoX reward-phase is not active debug!( - "Block {} is in a prepare phase or post-PoX sunset, so no windowing will take place", - burn_block_height; + "Block {burn_block_height} is in a prepare phase or post-PoX sunset, so no windowing will take place" ); assert_eq!(windowed_block_commits.len(), 1); @@ -197,19 +195,19 @@ impl MinerStats { .stderr(Stdio::piped()) .args(args); - debug!("Run: `{:?}`", &cmd); + debug!("Run: `{cmd:?}`"); let output = cmd .spawn() - .map_err(|e| format!("Failed to run `{}`: {:?}", &full_args, &e))? + .map_err(|e| format!("Failed to run `{full_args}`: {e:?}"))? .wait_with_output() - .map_err(|ioe| format!("Failed to run `{}`: {:?}", &full_args, &ioe))?; + .map_err(|ioe| format!("Failed to run `{full_args}`: {ioe:?}"))?; let exit_code = match output.status.code() { Some(code) => code, None => { // failed due to signal - return Err(format!("Failed to run `{}`: killed by signal", &full_args)); + return Err(format!("Failed to run `{full_args}`: killed by signal")); } }; @@ -223,11 +221,11 @@ impl MinerStats { all_miners: &[&str], ) -> Result, String> { let (exit_code, stdout, _stderr) = - Self::run_subprocess(&self.unconfirmed_commits_helper, &all_miners)?; + Self::run_subprocess(&self.unconfirmed_commits_helper, all_miners)?; if exit_code != 0 { return Err(format!( - "Failed to run `{}`: exit code {}", - &self.unconfirmed_commits_helper, exit_code + "Failed to run `{}`: exit code {exit_code}", + &self.unconfirmed_commits_helper )); } @@ -235,9 +233,8 @@ impl MinerStats { let unconfirmed_commits: Vec = serde_json::from_slice(&stdout) .map_err(|e| { format!( - "Failed to decode output from `{}`: {:?}. Output was `{}`", + "Failed to decode output from `{}`: {e:?}. Output was `{}`", &self.unconfirmed_commits_helper, - &e, String::from_utf8_lossy(&stdout) ) })?; @@ -255,22 +252,21 @@ impl MinerStats { }; let mut decoded_pox_addrs = vec![]; for pox_addr_hex in unconfirmed_commit.pox_addrs.iter() { - let Ok(pox_addr_bytes) = hex_bytes(&pox_addr_hex) else { - return Err(format!("Not a hex string: `{}`", &pox_addr_hex)); + let Ok(pox_addr_bytes) = hex_bytes(pox_addr_hex) else { + return Err(format!("Not a hex string: `{pox_addr_hex}`")); }; let Some(bitcoin_addr) = BitcoinAddress::from_scriptpubkey(BitcoinNetworkType::Mainnet, &pox_addr_bytes) else { return Err(format!( - "Not a recognized Bitcoin scriptpubkey: {}", - &pox_addr_hex + "Not a recognized Bitcoin scriptpubkey: {pox_addr_hex}" )); }; let Some(pox_addr) = PoxAddress::try_from_bitcoin_output(&BitcoinTxOutput { address: bitcoin_addr.clone(), units: 1, }) else { - return Err(format!("Not a recognized PoX address: {}", &bitcoin_addr)); + return Err(format!("Not a recognized PoX address: {bitcoin_addr}")); }; decoded_pox_addrs.push(pox_addr); } @@ -279,8 +275,8 @@ impl MinerStats { let mocked_commit = LeaderBlockCommitOp { treatment: vec![], sunset_burn: 0, - block_header_hash: BlockHeaderHash(DEADBEEF.clone()), - new_seed: VRFSeed(DEADBEEF.clone()), + block_header_hash: BlockHeaderHash(DEADBEEF), + new_seed: VRFSeed(DEADBEEF), parent_block_ptr: 1, parent_vtxindex: 1, key_block_ptr: 1, @@ -295,7 +291,7 @@ impl MinerStats { block_height: next_block_height, burn_parent_modulus: ((next_block_height.saturating_sub(1)) % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: BurnchainHeaderHash(DEADBEEF.clone()), + burn_header_hash: BurnchainHeaderHash(DEADBEEF), }; unconfirmed_spends.push(mocked_commit); @@ -306,7 +302,7 @@ impl MinerStats { /// Convert a list of burn sample points into a probability distribution by candidate's /// apparent sender (e.g. miner address). pub fn burn_dist_to_prob_dist(burn_dist: &[BurnSamplePoint]) -> HashMap { - if burn_dist.len() == 0 { + if burn_dist.is_empty() { return HashMap::new(); } if burn_dist.len() == 1 { @@ -343,13 +339,11 @@ impl MinerStats { if commit.commit_outs.len() != expected_pox_addrs.len() { return false; } - for i in 0..commit.commit_outs.len() { - if commit.commit_outs[i].to_burnchain_repr() - != expected_pox_addrs[i].to_burnchain_repr() - { + for (i, commit_out) in commit.commit_outs.iter().enumerate() { + if commit_out.to_burnchain_repr() != expected_pox_addrs[i].to_burnchain_repr() { info!( "Skipping invalid unconfirmed block-commit: {:?} != {:?}", - &commit.commit_outs[i].to_burnchain_repr(), + &commit_out.to_burnchain_repr(), expected_pox_addrs[i].to_burnchain_repr() ); return false; @@ -391,7 +385,7 @@ impl MinerStats { let (dist, total_spend) = Self::get_spend_distribution( active_miners_and_commits, unconfirmed_block_commits, - &expected_pox_addrs, + expected_pox_addrs, ); let mut probs = HashMap::new(); @@ -444,8 +438,8 @@ impl MinerStats { let mocked_commit = LeaderBlockCommitOp { treatment: vec![], sunset_burn: 0, - block_header_hash: BlockHeaderHash(DEADBEEF.clone()), - new_seed: VRFSeed(DEADBEEF.clone()), + block_header_hash: BlockHeaderHash(DEADBEEF), + new_seed: VRFSeed(DEADBEEF), parent_block_ptr: 2, parent_vtxindex: 2, key_block_ptr: 2, @@ -455,13 +449,13 @@ impl MinerStats { burn_fee: last_commit.burn_fee, input: (last_commit.txid, expected_input_index), apparent_sender: last_commit.apparent_sender.clone(), - txid: Txid(DEADBEEF.clone()), + txid: Txid(DEADBEEF), vtxindex: 1, block_height: next_block_height, burn_parent_modulus: ((next_block_height.saturating_sub(1)) % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: BurnchainHeaderHash(DEADBEEF.clone()), + burn_header_hash: BurnchainHeaderHash(DEADBEEF), }; commit_table.insert(miner.to_string(), mocked_commit); } @@ -473,13 +467,11 @@ impl MinerStats { if commit.commit_outs.len() != expected_pox_addrs.len() { return false; } - for i in 0..commit.commit_outs.len() { - if commit.commit_outs[i].to_burnchain_repr() - != expected_pox_addrs[i].to_burnchain_repr() - { + for (i, commit_out) in commit.commit_outs.iter().enumerate() { + if commit_out.to_burnchain_repr() != expected_pox_addrs[i].to_burnchain_repr() { info!( "Skipping invalid unconfirmed block-commit: {:?} != {:?}", - &commit.commit_outs[i].to_burnchain_repr(), + &commit_out.to_burnchain_repr(), expected_pox_addrs[i].to_burnchain_repr() ); return false; @@ -520,9 +512,7 @@ impl MinerStats { SortitionDB::get_block_commits_by_block(sortdb.conn(), &tip.sortition_id)?; for commit in commits.into_iter() { let miner = commit.apparent_sender.to_string(); - if miners.get(&miner).is_none() { - miners.insert(miner, commit); - } + miners.entry(miner).or_insert(commit); } tip = SortitionDB::get_block_snapshot(sortdb.conn(), &tip.parent_sortition_id)? .ok_or(DBError::NotFoundError)?; @@ -750,11 +740,11 @@ echo < { assert_eq!(spend, 2); @@ -1064,7 +1054,7 @@ EOF assert_eq!(spend, 10); } _ => { - panic!("unknown miner {}", &miner); + panic!("unknown miner {miner}"); } } } @@ -1082,7 +1072,7 @@ EOF ] { let prob = *win_probs .get(miner) - .unwrap_or_else(|| panic!("no probability for {}", &miner)); + .unwrap_or_else(|| panic!("no probability for {miner}")); match miner.as_str() { "miner-1" => { assert!((prob - (2.0 / 25.0)).abs() < 0.00001); @@ -1097,7 +1087,7 @@ EOF assert!((prob - (10.0 / 25.0)).abs() < 0.00001); } _ => { - panic!("unknown miner {}", &miner); + panic!("unknown miner {miner}"); } } } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 0beed9471d..47e3baafc5 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -86,7 +86,10 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const INV_REWARD_CYCLES_TESTNET: u64 = 6; -const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1000; +const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1_000; +const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; +const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; +const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] @@ -106,15 +109,16 @@ pub struct ConfigFile { impl ConfigFile { pub fn from_path(path: &str) -> Result { - let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {}", &e))?; + let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {e}"))?; let mut f = Self::from_str(&content)?; f.__path = Some(path.to_string()); Ok(f) } + #[allow(clippy::should_implement_trait)] pub fn from_str(content: &str) -> Result { let mut config: ConfigFile = - toml::from_str(content).map_err(|e| format!("Invalid toml: {}", e))?; + toml::from_str(content).map_err(|e| format!("Invalid toml: {e}"))?; if let Some(mstx_balance) = config.mstx_balance.take() { warn!("'mstx_balance' in the config is deprecated; please use 'ustx_balance' instead."); match config.ustx_balance { @@ -365,7 +369,7 @@ impl Config { let Ok(config) = Config::from_config_file(config_file, false) else { return self.miner.clone(); }; - return config.miner; + config.miner } pub fn get_node_config(&self, resolve_bootstrap_nodes: bool) -> NodeConfig { @@ -378,7 +382,7 @@ impl Config { let Ok(config) = Config::from_config_file(config_file, resolve_bootstrap_nodes) else { return self.node.clone(); }; - return config.node; + config.node } /// Apply any test settings to this burnchain config struct @@ -390,26 +394,26 @@ impl Config { if let Some(first_burn_block_height) = self.burnchain.first_burn_block_height { debug!( - "Override first_block_height from {} to {}", - burnchain.first_block_height, first_burn_block_height + "Override first_block_height from {} to {first_burn_block_height}", + burnchain.first_block_height ); burnchain.first_block_height = first_burn_block_height; } if let Some(first_burn_block_timestamp) = self.burnchain.first_burn_block_timestamp { debug!( - "Override first_block_timestamp from {} to {}", - burnchain.first_block_timestamp, first_burn_block_timestamp + "Override first_block_timestamp from {} to {first_burn_block_timestamp}", + burnchain.first_block_timestamp ); burnchain.first_block_timestamp = first_burn_block_timestamp; } if let Some(first_burn_block_hash) = &self.burnchain.first_burn_block_hash { debug!( - "Override first_burn_block_hash from {} to {}", - burnchain.first_block_hash, first_burn_block_hash + "Override first_burn_block_hash from {} to {first_burn_block_hash}", + burnchain.first_block_hash ); - burnchain.first_block_hash = BurnchainHeaderHash::from_hex(&first_burn_block_hash) + burnchain.first_block_hash = BurnchainHeaderHash::from_hex(first_burn_block_hash) .expect("Invalid first_burn_block_hash"); } @@ -425,8 +429,8 @@ impl Config { if let Some(v1_unlock_height) = self.burnchain.pox_2_activation { debug!( - "Override v1_unlock_height from {} to {}", - burnchain.pox_constants.v1_unlock_height, v1_unlock_height + "Override v1_unlock_height from {} to {v1_unlock_height}", + burnchain.pox_constants.v1_unlock_height ); burnchain.pox_constants.v1_unlock_height = v1_unlock_height; } @@ -508,22 +512,22 @@ impl Config { if let Some(sunset_start) = self.burnchain.sunset_start { debug!( - "Override sunset_start from {} to {}", - burnchain.pox_constants.sunset_start, sunset_start + "Override sunset_start from {} to {sunset_start}", + burnchain.pox_constants.sunset_start ); burnchain.pox_constants.sunset_start = sunset_start.into(); } if let Some(sunset_end) = self.burnchain.sunset_end { debug!( - "Override sunset_end from {} to {}", - burnchain.pox_constants.sunset_end, sunset_end + "Override sunset_end from {} to {sunset_end}", + burnchain.pox_constants.sunset_end ); burnchain.pox_constants.sunset_end = sunset_end.into(); } // check if the Epoch 3.0 burnchain settings as configured are going to be valid. - self.check_nakamoto_config(&burnchain); + self.check_nakamoto_config(burnchain); } fn check_nakamoto_config(&self, burnchain: &Burnchain) { @@ -592,7 +596,7 @@ impl Config { match Burnchain::new(&working_dir, &self.burnchain.chain, &network_name) { Ok(burnchain) => burnchain, Err(e) => { - error!("Failed to instantiate burnchain: {}", e); + error!("Failed to instantiate burnchain: {e}"); panic!() } } @@ -610,7 +614,7 @@ impl Config { let _ = StacksEpoch::validate_epochs(epochs); // sanity check: v1_unlock_height must happen after pox-2 instantiation - let epoch21_index = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch21) + let epoch21_index = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch21) .expect("FATAL: no epoch 2.1 defined"); let epoch21 = &epochs[epoch21_index]; @@ -618,7 +622,7 @@ impl Config { assert!( v1_unlock_height > epoch21.start_height, - "FATAL: v1 unlock height occurs at or before pox-2 activation: {} <= {}\nburnchain: {:?}", v1_unlock_height, epoch21.start_height, burnchain + "FATAL: v1 unlock height occurs at or before pox-2 activation: {v1_unlock_height} <= {}\nburnchain: {burnchain:?}", epoch21.start_height ); let epoch21_rc = burnchain @@ -633,8 +637,7 @@ impl Config { // the reward cycle boundary. assert!( !burnchain.is_reward_cycle_start(v1_unlock_height), - "FATAL: v1 unlock height is at a reward cycle boundary\nburnchain: {:?}", - burnchain + "FATAL: v1 unlock height is at a reward cycle boundary\nburnchain: {burnchain:?}" ); } } @@ -676,7 +679,7 @@ impl Config { } else if epoch_name == EPOCH_CONFIG_3_0_0 { Ok(StacksEpochId::Epoch30) } else { - Err(format!("Unknown epoch name specified: {}", epoch_name)) + Err(format!("Unknown epoch name specified: {epoch_name}")) }?; matched_epochs.push((epoch_id, configured_epoch.start_height)); } @@ -707,9 +710,7 @@ impl Config { .zip(matched_epochs.iter().map(|(epoch_id, _)| epoch_id)) { if expected_epoch != configured_epoch { - return Err(format!( - "Configured epochs may not skip an epoch. Expected epoch = {}, Found epoch = {}", - expected_epoch, configured_epoch)); + return Err(format!("Configured epochs may not skip an epoch. Expected epoch = {expected_epoch}, Found epoch = {configured_epoch}")); } } @@ -729,8 +730,8 @@ impl Config { for (i, (epoch_id, start_height)) in matched_epochs.iter().enumerate() { if epoch_id != &out_epochs[i].epoch_id { return Err( - format!("Unmatched epochs in configuration and node implementation. Implemented = {}, Configured = {}", - epoch_id, &out_epochs[i].epoch_id)); + format!("Unmatched epochs in configuration and node implementation. Implemented = {epoch_id}, Configured = {}", + &out_epochs[i].epoch_id)); } // end_height = next epoch's start height || i64::max if last epoch let end_height = if i + 1 < matched_epochs.len() { @@ -756,7 +757,7 @@ impl Config { .find(|&e| e.epoch_id == StacksEpochId::Epoch21) .ok_or("Cannot configure pox_2_activation if epoch 2.1 is not configured")?; if last_epoch.start_height > pox_2_activation as u64 { - Err(format!("Cannot configure pox_2_activation at a lower height than the Epoch 2.1 start height. pox_2_activation = {}, epoch 2.1 start height = {}", pox_2_activation, last_epoch.start_height))?; + Err(format!("Cannot configure pox_2_activation at a lower height than the Epoch 2.1 start height. pox_2_activation = {pox_2_activation}, epoch 2.1 start height = {}", last_epoch.start_height))?; } } @@ -808,7 +809,7 @@ impl Config { } if burnchain.mode == "helium" && burnchain.local_mining_public_key.is_none() { - return Err(format!("Config is missing the setting `burnchain.local_mining_public_key` (mandatory for helium)")); + return Err("Config is missing the setting `burnchain.local_mining_public_key` (mandatory for helium)".into()); } let is_mainnet = burnchain.mode == "mainnet"; @@ -832,27 +833,17 @@ impl Config { burnchain.peer_version, ); } - } else { - if is_mainnet && resolve_bootstrap_nodes { - let bootstrap_node = ConfigFile::mainnet().node.unwrap().bootstrap_node.unwrap(); - node.set_bootstrap_nodes( - bootstrap_node, - burnchain.chain_id, - burnchain.peer_version, - ); - } + } else if is_mainnet && resolve_bootstrap_nodes { + let bootstrap_node = ConfigFile::mainnet().node.unwrap().bootstrap_node.unwrap(); + node.set_bootstrap_nodes(bootstrap_node, burnchain.chain_id, burnchain.peer_version); } if let Some(deny_nodes) = deny_nodes { node.set_deny_nodes(deny_nodes, burnchain.chain_id, burnchain.peer_version); } // Validate the node config - if is_mainnet { - if node.use_test_genesis_chainstate == Some(true) { - return Err(format!( - "Attempted to run mainnet node with `use_test_genesis_chainstate`" - )); - } + if is_mainnet && node.use_test_genesis_chainstate == Some(true) { + return Err("Attempted to run mainnet node with `use_test_genesis_chainstate`".into()); } if node.stacker || node.miner { @@ -867,10 +858,10 @@ impl Config { let initial_balances: Vec = match config_file.ustx_balance { Some(balances) => { - if is_mainnet && balances.len() > 0 { - return Err(format!( - "Attempted to run mainnet node with specified `initial_balances`" - )); + if is_mainnet && !balances.is_empty() { + return Err( + "Attempted to run mainnet node with specified `initial_balances`".into(), + ); } balances .iter() @@ -911,16 +902,12 @@ impl Config { }; // check for observer config in env vars - match std::env::var("STACKS_EVENT_OBSERVER") { - Ok(val) => { - events_observers.insert(EventObserverConfig { - endpoint: val, - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1_000, - }); - () - } - _ => (), + if let Ok(val) = std::env::var("STACKS_EVENT_OBSERVER") { + events_observers.insert(EventObserverConfig { + endpoint: val, + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1_000, + }); }; let connection_options = match config_file.connection_options { @@ -1068,14 +1055,11 @@ impl Config { } pub fn is_mainnet(&self) -> bool { - match self.burnchain.mode.as_str() { - "mainnet" => true, - _ => false, - } + matches!(self.burnchain.mode.as_str(), "mainnet") } pub fn is_node_event_driven(&self) -> bool { - self.events_observers.len() > 0 + !self.events_observers.is_empty() } pub fn make_nakamoto_block_builder_settings( @@ -1155,12 +1139,11 @@ impl Config { /// part dependent on the state machine getting block data back to the miner quickly, and thus /// the poll time is dependent on the first attempt time. pub fn get_poll_time(&self) -> u64 { - let poll_timeout = if self.node.miner { + if self.node.miner { cmp::min(1000, self.miner.first_attempt_time_ms / 2) } else { 1000 - }; - poll_timeout + } } } @@ -1251,7 +1234,7 @@ impl BurnchainConfig { username: None, password: None, timeout: 60, - magic_bytes: BLOCKSTACK_MAGIC_MAINNET.clone(), + magic_bytes: BLOCKSTACK_MAGIC_MAINNET, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: 10, // TODO: this is a testnet specific value. @@ -1282,22 +1265,18 @@ impl BurnchainConfig { false => "http://", }; let wallet_path = if let Some(wallet_id) = wallet.as_ref() { - format!("/wallet/{}", wallet_id) + format!("/wallet/{wallet_id}") } else { "".to_string() }; - format!( - "{}{}:{}{}", - scheme, self.peer_host, self.rpc_port, wallet_path - ) + format!("{scheme}{}:{}{wallet_path}", self.peer_host, self.rpc_port) } pub fn get_rpc_socket_addr(&self) -> SocketAddr { let mut addrs_iter = format!("{}:{}", self.peer_host, self.rpc_port) .to_socket_addrs() .unwrap(); - let sock_addr = addrs_iter.next().unwrap(); - sock_addr + addrs_iter.next().unwrap() } pub fn get_bitcoin_network(&self) -> (String, BitcoinNetworkType) { @@ -1318,15 +1297,15 @@ pub struct StacksEpochConfigFile { start_height: i64, } -pub const EPOCH_CONFIG_1_0_0: &'static str = "1.0"; -pub const EPOCH_CONFIG_2_0_0: &'static str = "2.0"; -pub const EPOCH_CONFIG_2_0_5: &'static str = "2.05"; -pub const EPOCH_CONFIG_2_1_0: &'static str = "2.1"; -pub const EPOCH_CONFIG_2_2_0: &'static str = "2.2"; -pub const EPOCH_CONFIG_2_3_0: &'static str = "2.3"; -pub const EPOCH_CONFIG_2_4_0: &'static str = "2.4"; -pub const EPOCH_CONFIG_2_5_0: &'static str = "2.5"; -pub const EPOCH_CONFIG_3_0_0: &'static str = "3.0"; +pub const EPOCH_CONFIG_1_0_0: &str = "1.0"; +pub const EPOCH_CONFIG_2_0_0: &str = "2.0"; +pub const EPOCH_CONFIG_2_0_5: &str = "2.05"; +pub const EPOCH_CONFIG_2_1_0: &str = "2.1"; +pub const EPOCH_CONFIG_2_2_0: &str = "2.2"; +pub const EPOCH_CONFIG_2_3_0: &str = "2.3"; +pub const EPOCH_CONFIG_2_4_0: &str = "2.4"; +pub const EPOCH_CONFIG_2_5_0: &str = "2.5"; +pub const EPOCH_CONFIG_3_0_0: &str = "3.0"; #[derive(Clone, Deserialize, Default, Debug)] pub struct AffirmationOverride { @@ -1521,15 +1500,14 @@ impl BurnchainConfigFile { // Using std::net::LookupHost would be preferable, but it's // unfortunately unstable at this point. // https://doc.rust-lang.org/1.6.0/std/net/struct.LookupHost.html - let mut sock_addrs = format!("{}:1", &peer_host) + let mut sock_addrs = format!("{peer_host}:1") .to_socket_addrs() - .map_err(|e| format!("Invalid burnchain.peer_host: {}", &e))?; + .map_err(|e| format!("Invalid burnchain.peer_host: {e}"))?; let sock_addr = match sock_addrs.next() { Some(addr) => addr, None => { return Err(format!( - "No IP address could be queried for '{}'", - &peer_host + "No IP address could be queried for '{peer_host}'" )); } }; @@ -1726,10 +1704,7 @@ impl CostEstimatorName { if &s.to_lowercase() == "naive_pessimistic" { CostEstimatorName::NaivePessimistic } else { - panic!( - "Bad cost estimator name supplied in configuration file: {}", - s - ); + panic!("Bad cost estimator name supplied in configuration file: {s}"); } } } @@ -1741,10 +1716,7 @@ impl FeeEstimatorName { } else if &s.to_lowercase() == "fuzzed_weighted_median_fee_rate" { FeeEstimatorName::FuzzedWeightedMedianFeeRate } else { - panic!( - "Bad fee estimator name supplied in configuration file: {}", - s - ); + panic!("Bad fee estimator name supplied in configuration file: {s}"); } } } @@ -1754,7 +1726,7 @@ impl CostMetricName { if &s.to_lowercase() == "proportion_dot_product" { CostMetricName::ProportionDotProduct } else { - panic!("Bad cost metric name supplied in configuration file: {}", s); + panic!("Bad cost metric name supplied in configuration file: {s}"); } } } @@ -1924,7 +1896,7 @@ impl Default for NodeConfig { rng.fill_bytes(&mut buf); let now = get_epoch_time_ms(); - let testnet_id = format!("stacks-node-{}", now); + let testnet_id = format!("stacks-node-{now}"); let rpc_port = 20443; let p2p_port = 20444; @@ -1939,11 +1911,11 @@ impl Default for NodeConfig { NodeConfig { name: name.to_string(), seed: seed.to_vec(), - working_dir: format!("/tmp/{}", testnet_id), - rpc_bind: format!("0.0.0.0:{}", rpc_port), - p2p_bind: format!("0.0.0.0:{}", p2p_port), - data_url: format!("http://127.0.0.1:{}", rpc_port), - p2p_address: format!("127.0.0.1:{}", rpc_port), + working_dir: format!("/tmp/{testnet_id}"), + rpc_bind: format!("0.0.0.0:{rpc_port}"), + p2p_bind: format!("0.0.0.0:{p2p_port}"), + data_url: format!("http://127.0.0.1:{rpc_port}"), + p2p_address: format!("127.0.0.1:{rpc_port}"), bootstrap_node: vec![], deny_nodes: vec![], local_peer_seed: local_peer_seed.to_vec(), @@ -1976,9 +1948,8 @@ impl NodeConfig { /// Get a SocketAddr for this node's RPC endpoint which uses the loopback address pub fn get_rpc_loopback(&self) -> Option { let rpc_port = SocketAddr::from_str(&self.rpc_bind) - .or_else(|e| { + .map_err(|e| { error!("Could not parse node.rpc_bind configuration setting as SocketAddr: {e}"); - Err(()) }) .ok()? .port(); @@ -2032,15 +2003,12 @@ impl NodeConfig { pub fn add_bootstrap_node(&mut self, bootstrap_node: &str, chain_id: u32, peer_version: u32) { let parts: Vec<&str> = bootstrap_node.split('@').collect(); if parts.len() != 2 { - panic!( - "Invalid bootstrap node '{}': expected PUBKEY@IP:PORT", - bootstrap_node - ); + panic!("Invalid bootstrap node '{bootstrap_node}': expected PUBKEY@IP:PORT"); } let (pubkey_str, hostport) = (parts[0], parts[1]); let pubkey = Secp256k1PublicKey::from_hex(pubkey_str) .unwrap_or_else(|_| panic!("Invalid public key '{pubkey_str}'")); - debug!("Resolve '{}'", &hostport); + debug!("Resolve '{hostport}'"); let mut attempts = 0; let max_attempts = 5; @@ -2052,22 +2020,16 @@ impl NodeConfig { if let Some(addr) = addrs.next() { break addr; } else { - panic!("No addresses found for '{}'", hostport); + panic!("No addresses found for '{hostport}'"); } } Err(e) => { if attempts >= max_attempts { - panic!( - "Failed to resolve '{}' after {} attempts: {}", - hostport, max_attempts, e - ); + panic!("Failed to resolve '{hostport}' after {max_attempts} attempts: {e}"); } else { error!( - "Attempt {} - Failed to resolve '{}': {}. Retrying in {:?}...", + "Attempt {} - Failed to resolve '{hostport}': {e}. Retrying in {delay:?}...", attempts + 1, - hostport, - e, - delay ); thread::sleep(delay); attempts += 1; @@ -2088,8 +2050,8 @@ impl NodeConfig { peer_version: u32, ) { for part in bootstrap_nodes.split(',') { - if part.len() > 0 { - self.add_bootstrap_node(&part, chain_id, peer_version); + if !part.is_empty() { + self.add_bootstrap_node(part, chain_id, peer_version); } } } @@ -2107,8 +2069,8 @@ impl NodeConfig { pub fn set_deny_nodes(&mut self, deny_nodes: String, chain_id: u32, peer_version: u32) { for part in deny_nodes.split(',') { - if part.len() > 0 { - self.add_deny_node(&part, chain_id, peer_version); + if !part.is_empty() { + self.add_deny_node(part, chain_id, peer_version); } } } @@ -2122,10 +2084,7 @@ impl NodeConfig { MARFOpenOpts::new( hash_mode, - &self - .marf_cache_strategy - .as_ref() - .unwrap_or(&"noop".to_string()), + self.marf_cache_strategy.as_deref().unwrap_or("noop"), false, ) } @@ -2183,6 +2142,12 @@ pub struct MinerConfig { /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined /// within the same second as its parent, it will be rejected by the signers. pub min_time_between_blocks_ms: u64, + /// Time in milliseconds to pause after receiving the first threshold rejection, before proposing a new block. + pub first_rejection_pause_ms: u64, + /// Time in milliseconds to pause after receiving subsequent threshold rejections, before proposing a new block. + pub subsequent_rejection_pause_ms: u64, + /// Duration to wait for a Nakamoto block after seeing a burnchain block before submitting a block commit. + pub block_commit_delay: Duration, } impl Default for MinerConfig { @@ -2213,6 +2178,9 @@ impl Default for MinerConfig { max_reorg_depth: 3, pre_nakamoto_mock_signing: false, // Should only default true if mining key is set min_time_between_blocks_ms: DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS, + first_rejection_pause_ms: DEFAULT_FIRST_REJECTION_PAUSE_MS, + subsequent_rejection_pause_ms: DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS, + block_commit_delay: Duration::from_millis(DEFAULT_BLOCK_COMMIT_DELAY_MS), } } } @@ -2264,6 +2232,7 @@ pub struct ConnectionOptionsFile { pub private_neighbors: Option, pub auth_token: Option, pub antientropy_retry: Option, + pub reject_blocks_pushed: Option, } impl ConnectionOptionsFile { @@ -2274,27 +2243,27 @@ impl ConnectionOptionsFile { public_ip_address .parse::() .map(|addr| (PeerAddress::from_socketaddr(&addr), addr.port())) - .map_err(|e| format!("Invalid connection_option.public_ip_address: {}", e)) + .map_err(|e| format!("Invalid connection_option.public_ip_address: {e}")) }) .transpose()?; let mut read_only_call_limit = HELIUM_DEFAULT_CONNECTION_OPTIONS .read_only_call_limit .clone(); - self.read_only_call_limit_write_length.map(|x| { + if let Some(x) = self.read_only_call_limit_write_length { read_only_call_limit.write_length = x; - }); - self.read_only_call_limit_write_count.map(|x| { + } + if let Some(x) = self.read_only_call_limit_write_count { read_only_call_limit.write_count = x; - }); - self.read_only_call_limit_read_length.map(|x| { + } + if let Some(x) = self.read_only_call_limit_read_length { read_only_call_limit.read_length = x; - }); - self.read_only_call_limit_read_count.map(|x| { + } + if let Some(x) = self.read_only_call_limit_read_count { read_only_call_limit.read_count = x; - }); - self.read_only_call_limit_runtime.map(|x| { + } + if let Some(x) = self.read_only_call_limit_runtime { read_only_call_limit.runtime = x; - }); + }; let default = ConnectionOptions::default(); Ok(ConnectionOptions { read_only_call_limit, @@ -2345,7 +2314,7 @@ impl ConnectionOptionsFile { .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.soft_max_clients_per_host), walk_interval: self .walk_interval - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_interval.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_interval), walk_seed_probability: self .walk_seed_probability .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_seed_probability), @@ -2367,7 +2336,7 @@ impl ConnectionOptionsFile { .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.maximum_call_argument_size), download_interval: self .download_interval - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.download_interval.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.download_interval), inv_sync_interval: self .inv_sync_interval .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.inv_sync_interval), @@ -2388,7 +2357,7 @@ impl ConnectionOptionsFile { force_disconnect_interval: self.force_disconnect_interval, max_http_clients: self .max_http_clients - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.max_http_clients.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.max_http_clients), connect_timeout: self.connect_timeout.unwrap_or(10), handshake_timeout: self.handshake_timeout.unwrap_or(5), max_sockets: self.max_sockets.unwrap_or(800) as usize, @@ -2396,6 +2365,9 @@ impl ConnectionOptionsFile { private_neighbors: self.private_neighbors.unwrap_or(true), auth_token: self.auth_token, antientropy_retry: self.antientropy_retry.unwrap_or(default.antientropy_retry), + reject_blocks_pushed: self + .reject_blocks_pushed + .unwrap_or(default.reject_blocks_pushed), ..default }) } @@ -2449,7 +2421,7 @@ impl NodeConfigFile { name: self.name.unwrap_or(default_node_config.name), seed: match self.seed { Some(seed) => hex_bytes(&seed) - .map_err(|_e| format!("node.seed should be a hex encoded string"))?, + .map_err(|_e| "node.seed should be a hex encoded string".to_string())?, None => default_node_config.seed, }, working_dir: std::env::var("STACKS_WORKING_DIR") @@ -2463,8 +2435,9 @@ impl NodeConfigFile { .data_url .unwrap_or_else(|| format!("http://{rpc_bind}")), local_peer_seed: match self.local_peer_seed { - Some(seed) => hex_bytes(&seed) - .map_err(|_e| format!("node.local_peer_seed should be a hex encoded string"))?, + Some(seed) => hex_bytes(&seed).map_err(|_e| { + "node.local_peer_seed should be a hex encoded string".to_string() + })?, None => default_node_config.local_peer_seed, }, miner, @@ -2519,7 +2492,7 @@ impl NodeConfigFile { .unwrap_or(default_node_config.chain_liveness_poll_time_secs), stacker_dbs: self .stacker_dbs - .unwrap_or(vec![]) + .unwrap_or_default() .iter() .filter_map(|contract_id| QualifiedContractIdentifier::parse(contract_id).ok()) .collect(), @@ -2575,6 +2548,9 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub pre_nakamoto_mock_signing: Option, pub min_time_between_blocks_ms: Option, + pub first_rejection_pause_ms: Option, + pub subsequent_rejection_pause_ms: Option, + pub block_commit_delay_ms: Option, } impl MinerConfigFile { @@ -2652,7 +2628,7 @@ impl MinerConfigFile { |txs_to_consider_str| match str::parse(txs_to_consider_str) { Ok(txtype) => txtype, Err(e) => { - panic!("could not parse '{}': {}", &txs_to_consider_str, &e); + panic!("could not parse '{txs_to_consider_str}': {e}"); } }, ) @@ -2668,7 +2644,7 @@ impl MinerConfigFile { .map(|origin_str| match StacksAddress::from_string(origin_str) { Some(addr) => addr, None => { - panic!("could not parse '{}' into a Stacks address", origin_str); + panic!("could not parse '{origin_str}' into a Stacks address"); } }) .collect() @@ -2688,6 +2664,9 @@ impl MinerConfigFile { } else { ms }).unwrap_or(miner_default_config.min_time_between_blocks_ms), + first_rejection_pause_ms: self.first_rejection_pause_ms.unwrap_or(miner_default_config.first_rejection_pause_ms), + subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), + block_commit_delay: self.block_commit_delay_ms.map(Duration::from_millis).unwrap_or(miner_default_config.block_commit_delay), }) } } @@ -2702,6 +2681,7 @@ pub struct AtlasConfigFile { impl AtlasConfigFile { // Can't inplement `Into` trait because this takes a parameter + #[allow(clippy::wrong_self_convention)] fn into_config(&self, mainnet: bool) -> AtlasConfig { let mut conf = AtlasConfig::new(mainnet); if let Some(val) = self.attachments_max_size { @@ -3003,7 +2983,7 @@ mod tests { "#, ) .unwrap_err(); - println!("{}", err); + println!("{err}"); assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); } @@ -3024,7 +3004,7 @@ mod tests { fn test_example_confs() { // For each config file in the ../conf/ directory, we should be able to parse it let conf_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("conf"); - println!("Reading config files from: {:?}", conf_dir); + println!("Reading config files from: {conf_dir:?}"); let conf_files = fs::read_dir(conf_dir).unwrap(); for entry in conf_files { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index bb05cd6128..88bfc8dae7 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -181,6 +181,12 @@ impl InnerStackerDBChannel { } } +impl Default for StackerDBChannel { + fn default() -> Self { + Self::new() + } +} + impl StackerDBChannel { pub const fn new() -> Self { Self { @@ -256,7 +262,7 @@ where serializer.serialize_str(&value.to_string()) } -fn serialize_pox_addresses(value: &Vec, serializer: S) -> Result +fn serialize_pox_addresses(value: &[PoxAddress], serializer: S) -> Result where S: serde::Serializer, { @@ -374,7 +380,7 @@ impl EventObserver { } Err(err) => { // Log the error, then retry after a delay - warn!("Failed to insert payload into event observer database: {:?}", err; + warn!("Failed to insert payload into event observer database: {err:?}"; "backoff" => ?backoff, "attempts" => attempts ); @@ -402,8 +408,8 @@ impl EventObserver { let id: i64 = row.get(0)?; let url: String = row.get(1)?; let payload_text: String = row.get(2)?; - let payload: serde_json::Value = serde_json::from_str(&payload_text) - .map_err(|e| db_error::SerializationError(e))?; + let payload: serde_json::Value = + serde_json::from_str(&payload_text).map_err(db_error::SerializationError)?; let timeout_ms: u64 = row.get(3)?; Ok((id, url, payload, timeout_ms)) }, @@ -457,7 +463,7 @@ impl EventObserver { ); let url = Url::parse(full_url) - .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {} as a URL", full_url)); + .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {full_url} as a URL")); let host = url.host_str().expect("Invalid URL: missing host"); let port = url.port_or_known_default().unwrap_or(80); @@ -494,8 +500,7 @@ impl EventObserver { } Err(err) => { warn!( - "Event dispatcher: connection or request failed to {}:{} - {:?}", - &host, &port, err; + "Event dispatcher: connection or request failed to {host}:{port} - {err:?}"; "backoff" => ?backoff, "attempts" => attempts ); @@ -549,11 +554,11 @@ impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { // Construct the full URL let url_str = if path.starts_with('/') { - format!("{}{}", &self.endpoint, path) + format!("{}{path}", &self.endpoint) } else { - format!("{}/{}", &self.endpoint, path) + format!("{}/{path}", &self.endpoint) }; - let full_url = format!("http://{}", url_str); + let full_url = format!("http://{url_str}"); if let Some(db_path) = &self.db_path { let conn = @@ -604,7 +609,7 @@ impl EventObserver { .collect(); json!({ - "burn_block_hash": format!("0x{}", burn_block), + "burn_block_hash": format!("0x{burn_block}"), "burn_block_height": burn_block_height, "reward_recipients": serde_json::Value::Array(reward_recipients), "reward_slot_holders": serde_json::Value::Array(reward_slot_holders), @@ -642,7 +647,7 @@ impl EventObserver { TransactionOrigin::Burn(op) => ( op.txid().to_string(), "00".to_string(), - BlockstackOperationType::blockstack_op_to_json(&op), + BlockstackOperationType::blockstack_op_to_json(op), ), TransactionOrigin::Stacks(ref tx) => { let txid = tx.txid().to_string(); @@ -741,10 +746,10 @@ impl EventObserver { .collect(); let payload = json!({ - "parent_index_block_hash": format!("0x{}", parent_index_block_hash), + "parent_index_block_hash": format!("0x{parent_index_block_hash}"), "events": serialized_events, "transactions": serialized_txs, - "burn_block_hash": format!("0x{}", burn_block_hash), + "burn_block_hash": format!("0x{burn_block_hash}"), "burn_block_height": burn_block_height, "burn_block_timestamp": burn_block_timestamp, }); @@ -776,6 +781,7 @@ impl EventObserver { self.send_payload(payload, PATH_BURN_BLOCK_SUBMIT); } + #[allow(clippy::too_many_arguments)] fn make_new_block_processed_payload( &self, filtered_events: Vec<(usize, &(bool, Txid, &StacksTransactionEvent))>, @@ -806,12 +812,15 @@ impl EventObserver { }) .collect(); - let mut tx_index: u32 = 0; let mut serialized_txs = vec![]; - for receipt in receipts.iter() { - let payload = EventObserver::make_new_block_txs_payload(receipt, tx_index); + for (tx_index, receipt) in receipts.iter().enumerate() { + let payload = EventObserver::make_new_block_txs_payload( + receipt, + tx_index + .try_into() + .expect("BUG: more receipts than U32::MAX"), + ); serialized_txs.push(payload); - tx_index += 1; } let signer_bitvec_value = signer_bitvec_opt @@ -821,7 +830,7 @@ impl EventObserver { let (reward_set_value, cycle_number_value) = match &reward_set_data { Some(data) => ( - serde_json::to_value(&RewardSetEventPayload::from_reward_set(&data.reward_set)) + serde_json::to_value(RewardSetEventPayload::from_reward_set(&data.reward_set)) .unwrap_or_default(), serde_json::to_value(data.cycle_number).unwrap_or_default(), ), @@ -835,17 +844,17 @@ impl EventObserver { "block_time": block_timestamp, "burn_block_hash": format!("0x{}", metadata.burn_header_hash), "burn_block_height": metadata.burn_header_height, - "miner_txid": format!("0x{}", winner_txid), + "miner_txid": format!("0x{winner_txid}"), "burn_block_time": metadata.burn_header_timestamp, "index_block_hash": format!("0x{}", metadata.index_block_hash()), "parent_block_hash": format!("0x{}", block.parent_block_hash), - "parent_index_block_hash": format!("0x{}", parent_index_hash), + "parent_index_block_hash": format!("0x{parent_index_hash}"), "parent_microblock": format!("0x{}", block.parent_microblock_hash), "parent_microblock_sequence": block.parent_microblock_sequence, "matured_miner_rewards": mature_rewards.clone(), "events": serialized_events, "transactions": serialized_txs, - "parent_burn_block_hash": format!("0x{}", parent_burn_block_hash), + "parent_burn_block_hash": format!("0x{parent_burn_block_hash}"), "parent_burn_block_height": parent_burn_block_height, "parent_burn_block_timestamp": parent_burn_block_timestamp, "anchored_cost": anchored_consumed, @@ -1097,6 +1106,12 @@ impl BlockEventDispatcher for EventDispatcher { } } +impl Default for EventDispatcher { + fn default() -> Self { + EventDispatcher::new() + } +} + impl EventDispatcher { pub fn new() -> EventDispatcher { EventDispatcher { @@ -1125,7 +1140,7 @@ impl EventDispatcher { ) { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.burn_block_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1149,6 +1164,7 @@ impl EventDispatcher { /// - dispatch_matrix: a vector where each index corresponds to the hashset of event indexes /// that each respective event observer is subscribed to /// - events: a vector of all events from all the tx receipts + #[allow(clippy::type_complexity)] fn create_dispatch_matrix_and_event_vector<'a>( &self, receipts: &'a Vec, @@ -1241,6 +1257,7 @@ impl EventDispatcher { (dispatch_matrix, events) } + #[allow(clippy::too_many_arguments)] pub fn process_chain_tip( &self, block: &StacksBlockEventData, @@ -1264,7 +1281,7 @@ impl EventDispatcher { let all_receipts = receipts.to_owned(); let (dispatch_matrix, events) = self.create_dispatch_matrix_and_event_vector(&all_receipts); - if dispatch_matrix.len() > 0 { + if !dispatch_matrix.is_empty() { let mature_rewards_vec = if let Some(rewards_info) = mature_rewards_info { mature_rewards .iter() @@ -1297,7 +1314,7 @@ impl EventDispatcher { let payload = self.registered_observers[observer_id] .make_new_block_processed_payload( filtered_events, - &block, + block, metadata, receipts, parent_index_hash, @@ -1342,7 +1359,7 @@ impl EventDispatcher { ) }) .collect(); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } let flattened_receipts = processed_unconfirmed_state @@ -1390,12 +1407,12 @@ impl EventDispatcher { .enumerate() .filter_map(|(obs_id, observer)| { let lookup_ix = u16::try_from(obs_id).expect("FATAL: more than 2^16 observers"); - if lookup.contains(&lookup_ix) { - return Some(observer); - } else if include_any && self.any_event_observers_lookup.contains(&lookup_ix) { - return Some(observer); + if lookup.contains(&lookup_ix) + || (include_any && self.any_event_observers_lookup.contains(&lookup_ix)) + { + Some(observer) } else { - return None; + None } }) .collect() @@ -1405,7 +1422,7 @@ impl EventDispatcher { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.mempool_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1427,7 +1444,7 @@ impl EventDispatcher { ) { let interested_observers = self.filter_observers(&self.miner_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1456,7 +1473,7 @@ impl EventDispatcher { ) { let interested_observers = self.filter_observers(&self.mined_microblocks_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1483,7 +1500,7 @@ impl EventDispatcher { tx_events: Vec, ) { let interested_observers = self.filter_observers(&self.miner_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1502,7 +1519,7 @@ impl EventDispatcher { block_size: block_size_bytes, cost: consumed.clone(), tx_events, - miner_signature: block.header.miner_signature.clone(), + miner_signature: block.header.miner_signature, signer_signature_hash: block.header.signer_signature_hash(), signer_signature: block.header.signer_signature.clone(), signer_bitvec, @@ -1522,8 +1539,7 @@ impl EventDispatcher { modified_slots: Vec, ) { debug!( - "event_dispatcher: New StackerDB chunk events for {}: {:?}", - contract_id, modified_slots + "event_dispatcher: New StackerDB chunk events for {contract_id}: {modified_slots:?}" ); let interested_observers = self.filter_observers(&self.stackerdb_observers_lookup, false); @@ -1558,13 +1574,13 @@ impl EventDispatcher { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.mempool_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } let dropped_txids: Vec<_> = txs .into_iter() - .map(|tx| serde_json::Value::String(format!("0x{}", &tx))) + .map(|tx| serde_json::Value::String(format!("0x{tx}"))) .collect(); let payload = json!({ @@ -1577,9 +1593,9 @@ impl EventDispatcher { } } - pub fn process_new_attachments(&self, attachments: &Vec<(AttachmentInstance, Attachment)>) { + pub fn process_new_attachments(&self, attachments: &[(AttachmentInstance, Attachment)]) { let interested_observers: Vec<_> = self.registered_observers.iter().enumerate().collect(); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1598,7 +1614,7 @@ impl EventDispatcher { &self, asset_identifier: &AssetIdentifier, event_index: usize, - dispatch_matrix: &mut Vec>, + dispatch_matrix: &mut [HashSet], ) { if let Some(observer_indexes) = self.assets_observers_lookup.get(asset_identifier) { for o_i in observer_indexes { @@ -1857,8 +1873,7 @@ mod test { // Assert that the connection attempt timed out assert!( result.is_err(), - "Expected a timeout error, but got {:?}", - result + "Expected a timeout error, but got {result:?}" ); assert_eq!( result.unwrap_err().kind(), @@ -2116,7 +2131,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let request = server.recv().unwrap(); assert_eq!(request.url(), "/test"); @@ -2131,7 +2146,7 @@ mod test { }); let observer = - EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); + EventObserver::new(None, format!("127.0.0.1:{port}"), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -2150,7 +2165,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; while let Ok(request) = server.recv() { @@ -2180,7 +2195,7 @@ mod test { }); let observer = - EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); + EventObserver::new(None, format!("127.0.0.1:{port}"), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -2200,7 +2215,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; let mut _request_holder = None; @@ -2224,7 +2239,7 @@ mod test { } }); - let observer = EventObserver::new(None, format!("127.0.0.1:{}", port), timeout); + let observer = EventObserver::new(None, format!("127.0.0.1:{port}"), timeout); let payload = json!({"key": "value"}); @@ -2237,7 +2252,7 @@ mod test { // Record the time after the function returns let elapsed_time = start_time.elapsed(); - println!("Elapsed time: {:?}", elapsed_time); + println!("Elapsed time: {elapsed_time:?}"); assert!( elapsed_time >= timeout, "Expected a timeout, but the function returned too quickly" @@ -2263,9 +2278,9 @@ mod test { // Set up a channel to notify when the server has processed the request let (tx, rx) = channel(); - info!("Starting mock server on port {}", port); + info!("Starting mock server on port {port}"); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; let mut _request_holder = None; @@ -2316,7 +2331,7 @@ mod test { let observer = EventObserver::new( Some(working_dir.clone()), - format!("127.0.0.1:{}", port), + format!("127.0.0.1:{port}"), timeout, ); diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index b1ddf2e82b..c285c6a168 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -23,6 +23,7 @@ use crate::TipCandidate; pub type NeonGlobals = Globals; /// Command types for the relayer thread, issued to it by other threads +#[allow(clippy::large_enum_variant)] pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and HandleNetResult(NetworkResult), @@ -99,6 +100,7 @@ impl Clone for Globals { } impl Globals { + #[allow(clippy::too_many_arguments)] pub fn new( coord_comms: CoordinatorChannels, miner_status: Arc>, @@ -282,15 +284,14 @@ impl Globals { **leader_key_registration_state { info!( - "Received burnchain block #{} including key_register_op - {}", - burn_block_height, txid + "Received burnchain block #{burn_block_height} including key_register_op - {txid}" ); if txid == op.txid { let active_key = RegisteredKey { target_block_height, vrf_public_key: op.public_key, - block_height: op.block_height as u64, - op_vtxindex: op.vtxindex as u32, + block_height: op.block_height, + op_vtxindex: op.vtxindex, memo: op.memo, }; @@ -300,8 +301,8 @@ impl Globals { activated_key = Some(active_key); } else { debug!( - "key_register_op {} does not match our pending op {}", - txid, &op.txid + "key_register_op {txid} does not match our pending op {}", + &op.txid ); } } @@ -450,10 +451,7 @@ impl Globals { /// Clear the initiative flag and return its value pub fn take_initiative(&self) -> Option { match self.initiative.lock() { - Ok(mut initiative) => { - let ret = (*initiative).take(); - ret - } + Ok(mut initiative) => (*initiative).take(), Err(_e) => { error!("FATAL: failed to lock initiative"); panic!(); diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index b6df8549c4..4e85750880 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -123,10 +123,7 @@ impl Keychain { let proof = VRF::prove(&sk, bytes.as_ref()); // Ensure that the proof is valid by verifying - let is_valid = match VRF::verify(&pk, &proof, bytes.as_ref()) { - Ok(v) => v, - Err(_) => false, - }; + let is_valid = VRF::verify(&pk, &proof, bytes.as_ref()).unwrap_or(false); assert!(is_valid); proof } @@ -178,7 +175,7 @@ impl Keychain { } /// Sign a transaction as if we were the origin - pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () { + pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) { let sk = self.get_secret_key(); tx_signer .sign_origin(&sk) @@ -333,7 +330,7 @@ mod tests { } }; sk.set_compress_public(true); - self.microblocks_secret_keys.push(sk.clone()); + self.microblocks_secret_keys.push(sk); debug!("Microblock keypair rotated"; "burn_block_height" => %burn_block_height, @@ -346,7 +343,7 @@ mod tests { self.microblocks_secret_keys.last().cloned() } - pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () { + pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) { let num_keys = if self.secret_keys.len() < self.threshold as usize { self.secret_keys.len() } else { @@ -364,18 +361,15 @@ mod tests { let vrf_sk = match self.vrf_map.get(vrf_pk) { Some(vrf_pk) => vrf_pk, None => { - warn!("No VRF secret key on file for {:?}", vrf_pk); + warn!("No VRF secret key on file for {vrf_pk:?}"); return None; } }; // Generate the proof - let proof = VRF::prove(&vrf_sk, bytes.as_ref()); + let proof = VRF::prove(vrf_sk, bytes.as_ref()); // Ensure that the proof is valid by verifying - let is_valid = match VRF::verify(vrf_pk, &proof, bytes.as_ref()) { - Ok(v) => v, - Err(_) => false, - }; + let is_valid = VRF::verify(vrf_pk, &proof, bytes.as_ref()).unwrap_or(false); assert!(is_valid); Some(proof) } @@ -385,7 +379,7 @@ mod tests { let public_keys = self .secret_keys .iter() - .map(|ref pk| StacksPublicKey::from_private(pk)) + .map(StacksPublicKey::from_private) .collect(); let version = if is_mainnet { self.hash_mode.to_version_mainnet() @@ -518,7 +512,7 @@ mod tests { TransactionVersion::Testnet, k1.get_transaction_auth().unwrap(), TransactionPayload::TokenTransfer( - recv_addr.clone().into(), + recv_addr.into(), 123, TokenTransferMemo([0u8; 34]), ), @@ -527,7 +521,7 @@ mod tests { TransactionVersion::Testnet, k2.get_transaction_auth().unwrap(), TransactionPayload::TokenTransfer( - recv_addr.clone().into(), + recv_addr.into(), 123, TokenTransferMemo([0u8; 34]), ), diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index fcdc9f5847..4fa1c5e5a7 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -63,11 +63,11 @@ static GLOBAL: Jemalloc = Jemalloc; /// Implmentation of `pick_best_tip` CLI option fn cli_pick_best_tip(config_path: &str, at_stacks_height: Option) -> TipCandidate { - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); let config = match ConfigFile::from_path(config_path) { Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -93,21 +93,21 @@ fn cli_pick_best_tip(config_path: &str, at_stacks_height: Option) -> TipCan at_stacks_height, ); - let best_tip = BlockMinerThread::inner_pick_best_tip(stacks_tips, HashMap::new()).unwrap(); - best_tip + BlockMinerThread::inner_pick_best_tip(stacks_tips, HashMap::new()).unwrap() } /// Implementation of `get_miner_spend` CLI option +#[allow(clippy::incompatible_msrv)] fn cli_get_miner_spend( config_path: &str, mine_start: Option, at_burnchain_height: Option, ) -> u64 { - info!("Loading config at path {}", config_path); - let config = match ConfigFile::from_path(&config_path) { + info!("Loading config at path {config_path}"); + let config = match ConfigFile::from_path(config_path) { Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -155,7 +155,7 @@ fn cli_get_miner_spend( &config, &keychain, &burnchain, - &mut sortdb, + &sortdb, &commit_outs, mine_start.unwrap_or(tip.block_height), at_burnchain_height, @@ -171,7 +171,7 @@ fn cli_get_miner_spend( else { return 0.0; }; - if active_miners_and_commits.len() == 0 { + if active_miners_and_commits.is_empty() { warn!("No active miners detected; using config file burn_fee_cap"); return 0.0; } @@ -181,7 +181,7 @@ fn cli_get_miner_spend( .map(|(miner, _cmt)| miner.as_str()) .collect(); - info!("Active miners: {:?}", &active_miners); + info!("Active miners: {active_miners:?}"); let Ok(unconfirmed_block_commits) = miner_stats .get_unconfirmed_commits(burn_block_height + 1, &active_miners) @@ -195,10 +195,7 @@ fn cli_get_miner_spend( .map(|cmt| (format!("{}", &cmt.apparent_sender), cmt.burn_fee)) .collect(); - info!( - "Found unconfirmed block-commits: {:?}", - &unconfirmed_miners_and_amounts - ); + info!("Found unconfirmed block-commits: {unconfirmed_miners_and_amounts:?}"); let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( &active_miners_and_commits, @@ -207,12 +204,11 @@ fn cli_get_miner_spend( ); let win_probs = if config.miner.fast_rampup { // look at spends 6+ blocks in the future - let win_probs = MinerStats::get_future_win_distribution( + MinerStats::get_future_win_distribution( &active_miners_and_commits, &unconfirmed_block_commits, &commit_outs, - ); - win_probs + ) } else { // look at the current spends let Ok(unconfirmed_burn_dist) = miner_stats @@ -229,14 +225,13 @@ fn cli_get_miner_spend( return 0.0; }; - let win_probs = MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist); - win_probs + MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; - info!("Unconfirmed spend distribution: {:?}", &spend_dist); + info!("Unconfirmed spend distribution: {spend_dist:?}"); info!( - "Unconfirmed win probabilities (fast_rampup={}): {:?}", - config.miner.fast_rampup, &win_probs + "Unconfirmed win probabilities (fast_rampup={}): {win_probs:?}", + config.miner.fast_rampup ); let miner_addrs = BlockMinerThread::get_miner_addrs(&config, &keychain); @@ -247,8 +242,8 @@ fn cli_get_miner_spend( .unwrap_or(0.0); info!( - "This miner's win probability at {} is {}", - tip.block_height, &win_prob + "This miner's win probability at {} is {win_prob}", + tip.block_height ); win_prob }, @@ -259,9 +254,9 @@ fn cli_get_miner_spend( fn main() { panic::set_hook(Box::new(|panic_info| { - error!("Process abort due to thread panic: {}", panic_info); + error!("Process abort due to thread panic: {panic_info}"); let bt = Backtrace::new(); - error!("Panic backtrace: {:?}", &bt); + error!("Panic backtrace: {bt:?}"); // force a core dump #[cfg(unix)] @@ -289,10 +284,7 @@ fn main() { .expect("Failed to parse --mine-at-height argument"); if let Some(mine_start) = mine_start { - info!( - "Will begin mining once Stacks chain has synced to height >= {}", - mine_start - ); + info!("Will begin mining once Stacks chain has synced to height >= {mine_start}"); } let config_file = match subcommand.as_str() { @@ -315,14 +307,14 @@ fn main() { "check-config" => { let config_path: String = args.value_from_str("--config").unwrap(); args.finish(); - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); let config_file = match ConfigFile::from_path(&config_path) { Ok(config_file) => { - debug!("Loaded config file: {:?}", config_file); + debug!("Loaded config file: {config_file:?}"); config_file } Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -332,7 +324,7 @@ fn main() { process::exit(0); } Err(e) => { - warn!("Invalid config: {}", e); + warn!("Invalid config: {e}"); process::exit(1); } }; @@ -340,11 +332,11 @@ fn main() { "start" => { let config_path: String = args.value_from_str("--config").unwrap(); args.finish(); - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); match ConfigFile::from_path(&config_path) { Ok(config_file) => config_file, Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } } @@ -391,7 +383,7 @@ fn main() { args.finish(); let best_tip = cli_pick_best_tip(&config_path, at_stacks_height); - println!("Best tip is {:?}", &best_tip); + println!("Best tip is {best_tip:?}"); process::exit(0); } "get-spend-amount" => { @@ -401,7 +393,7 @@ fn main() { args.finish(); let spend_amount = cli_get_miner_spend(&config_path, mine_start, at_burnchain_height); - println!("Will spend {}", spend_amount); + println!("Will spend {spend_amount}"); process::exit(0); } _ => { @@ -413,7 +405,7 @@ fn main() { let conf = match Config::from_config_file(config_file, true) { Ok(conf) => conf, Err(e) => { - warn!("Invalid config: {}", e); + warn!("Invalid config: {e}"); process::exit(1); } }; @@ -427,8 +419,7 @@ fn main() { if conf.burnchain.mode == "helium" || conf.burnchain.mode == "mocknet" { let mut run_loop = helium::RunLoop::new(conf); if let Err(e) = run_loop.start(num_round) { - warn!("Helium runloop exited: {}", e); - return; + warn!("Helium runloop exited: {e}"); } } else if conf.burnchain.mode == "neon" || conf.burnchain.mode == "nakamoto-neon" diff --git a/testnet/stacks-node/src/monitoring/prometheus.rs b/testnet/stacks-node/src/monitoring/prometheus.rs index e9705142d0..f91ac53bb4 100644 --- a/testnet/stacks-node/src/monitoring/prometheus.rs +++ b/testnet/stacks-node/src/monitoring/prometheus.rs @@ -20,10 +20,7 @@ pub fn start_serving_prometheus_metrics(bind_address: String) -> Result<(), Moni warn!("Prometheus monitoring: unable to get local bind address, will not spawn prometheus endpoint service."); MonitoringError::UnableToGetAddress })?; - info!( - "Prometheus monitoring: server listening on http://{}", - local_addr - ); + info!("Prometheus monitoring: server listening on http://{local_addr}"); let mut incoming = listener.incoming(); while let Some(stream) = incoming.next().await { diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 7cda49e10d..edaf12e98b 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -47,7 +47,7 @@ pub mod sign_coordinator; use self::peer::PeerThread; use self::relayer::{RelayerDirective, RelayerThread}; -pub const RELAYER_MAX_BUFFER: usize = 100; +pub const RELAYER_MAX_BUFFER: usize = 1; const VRF_MOCK_MINER_KEY: u64 = 1; pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB @@ -131,7 +131,7 @@ impl StacksNode { .get_miner_address(StacksEpochId::Epoch21, &public_key); let miner_addr_str = addr2str(&miner_addr); let _ = monitoring::set_burnchain_signer(BurnchainSigner(miner_addr_str)).map_err(|e| { - warn!("Failed to set global burnchain signer: {:?}", &e); + warn!("Failed to set global burnchain signer: {e:?}"); e }); } @@ -148,7 +148,7 @@ impl StacksNode { let burnchain = runloop.get_burnchain(); let atlas_config = config.atlas.clone(); let mut keychain = Keychain::default(config.node.seed.clone()); - if let Some(mining_key) = config.miner.mining_key.clone() { + if let Some(mining_key) = config.miner.mining_key { keychain.set_nakamoto_sk(mining_key); } @@ -195,7 +195,7 @@ impl StacksNode { match &data_from_neon.leader_key_registration_state { LeaderKeyRegistrationState::Active(registered_key) => { let pubkey_hash = keychain.get_nakamoto_pkh(); - if pubkey_hash.as_ref() == ®istered_key.memo { + if pubkey_hash.as_ref() == registered_key.memo { data_from_neon.leader_key_registration_state } else { LeaderKeyRegistrationState::Inactive @@ -308,13 +308,13 @@ impl StacksNode { for op in block_commits.into_iter() { if op.txid == block_snapshot.winning_block_txid { info!( - "Received burnchain block #{} including block_commit_op (winning) - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op (winning) - {} ({})", + op.apparent_sender, &op.block_header_hash ); } else if self.is_miner { info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op - {} ({})", + op.apparent_sender, &op.block_header_hash ); } } @@ -359,25 +359,25 @@ impl StacksNode { } pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) { - info!("Activated VRF key; saving to {}", path); + info!("Activated VRF key; saving to {path}"); let Ok(key_json) = serde_json::to_string(&activated_key) else { warn!("Failed to serialize VRF key"); return; }; - let mut f = match fs::File::create(&path) { + let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { - warn!("Failed to create {}: {:?}", &path, &e); + warn!("Failed to create {path}: {e:?}"); return; } }; - if let Err(e) = f.write_all(key_json.as_str().as_bytes()) { - warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); + if let Err(e) = f.write_all(key_json.as_bytes()) { + warn!("Failed to write activated VRF key to {path}: {e:?}"); return; } - info!("Saved activated VRF key to {}", &path); + info!("Saved activated VRF key to {path}"); } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index a08c0ab353..411e4f3be8 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -67,6 +67,7 @@ pub static TEST_SKIP_P2P_BROADCAST: std::sync::Mutex> = std::sync:: /// miner thread sleep before trying again? const ABORT_TRY_AGAIN_MS: u64 = 200; +#[allow(clippy::large_enum_variant)] pub enum MinerDirective { /// The miner won sortition so they should begin a new tenure BeginTenure { @@ -108,6 +109,8 @@ pub enum MinerReason { /// sortition. burn_view_consensus_hash: ConsensusHash, }, + /// The miner thread was spawned to initialize a prior empty tenure + EmptyTenure, } impl std::fmt::Display for MinerReason { @@ -120,6 +123,7 @@ impl std::fmt::Display for MinerReason { f, "Extended: burn_view_consensus_hash = {burn_view_consensus_hash:?}", ), + MinerReason::EmptyTenure => write!(f, "EmptyTenure"), } } } @@ -283,6 +287,7 @@ impl BlockMinerThread { } let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; + let mut last_block_rejected = false; // now, actually run this tenure loop { @@ -354,7 +359,7 @@ impl BlockMinerThread { // try again, in case a new sortition is pending self.globals - .raise_initiative(format!("MiningFailure: {:?}", &e)); + .raise_initiative(format!("MiningFailure: {e:?}")); return Err(NakamotoNodeError::MiningFailure( ChainstateError::MinerAborted, )); @@ -386,15 +391,25 @@ impl BlockMinerThread { return Err(e); } _ => { - error!("Error while gathering signatures: {e:?}. Will try mining again."; + // Sleep for a bit to allow signers to catch up + let pause_ms = if last_block_rejected { + self.config.miner.subsequent_rejection_pause_ms + } else { + self.config.miner.first_rejection_pause_ms + }; + + error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; "signer_sighash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); + thread::sleep(Duration::from_millis(pause_ms)); + last_block_rejected = true; continue; } }, }; + last_block_rejected = false; new_block.header.signer_signature = signer_signature; if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { @@ -413,7 +428,7 @@ impl BlockMinerThread { // update mined-block counters and mined-tenure counters self.globals.counters.bump_naka_mined_blocks(); - if !self.last_block_mined.is_none() { + if self.last_block_mined.is_some() { // this is the first block of the tenure, bump tenure counter self.globals.counters.bump_naka_mined_tenures(); } @@ -562,12 +577,12 @@ impl BlockMinerThread { &self.burnchain, &sort_db, &mut chain_state, - &stackerdbs, + stackerdbs, &self.globals.counters, &self.burn_election_block.consensus_hash, )?; - return Ok((reward_set, signature)); + Ok((reward_set, signature)) } /// Fault injection -- possibly fail to broadcast @@ -579,13 +594,12 @@ impl BlockMinerThread { .fault_injection_block_push_fail_probability .unwrap_or(0) .min(100); - let will_drop = if drop_prob > 0 { + if drop_prob > 0 { let throw: u8 = thread_rng().gen_range(0..100); throw < drop_prob } else { false - }; - will_drop + } } /// Store a block to the chainstate, and if successful (it should be since we mined it), @@ -610,7 +624,7 @@ impl BlockMinerThread { let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; let accepted = NakamotoChainState::accept_block( &chainstate_config, - &block, + block, &mut sortition_handle, &staging_tx, headers_conn, @@ -637,14 +651,14 @@ impl BlockMinerThread { } let block_id = block.block_id(); - debug!("Broadcasting block {}", &block_id); + debug!("Broadcasting block {block_id}"); if let Err(e) = self.p2p_handle.broadcast_message( vec![], StacksMessageType::NakamotoBlocks(NakamotoBlocksData { blocks: vec![block.clone()], }), ) { - warn!("Failed to broadcast block {}: {:?}", &block_id, &e); + warn!("Failed to broadcast block {block_id}: {e:?}"); } Ok(()) } @@ -693,7 +707,7 @@ impl BlockMinerThread { miner_privkey, &sort_db, &self.burn_block, - &stackerdbs, + stackerdbs, SignerMessage::BlockPushed(block), MinerSlotID::BlockPushed, chain_state.mainnet, @@ -790,7 +804,7 @@ impl BlockMinerThread { // load up stacks chain tip let (stacks_tip_ch, stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()).map_err(|e| { - error!("Failed to load canonical Stacks tip: {:?}", &e); + error!("Failed to load canonical Stacks tip: {e:?}"); NakamotoNodeError::ParentNotFound })?; @@ -802,8 +816,8 @@ impl BlockMinerThread { ) .map_err(|e| { error!( - "Could not query header info for tenure tip {} off of {}: {:?}", - &self.burn_election_block.consensus_hash, &stacks_tip_block_id, &e + "Could not query header info for tenure tip {} off of {stacks_tip_block_id}: {e:?}", + &self.burn_election_block.consensus_hash ); NakamotoNodeError::ParentNotFound })?; @@ -831,8 +845,8 @@ impl BlockMinerThread { NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) .map_err(|e| { error!( - "Could not query header for parent tenure ID {}: {:?}", - &self.parent_tenure_id, &e + "Could not query header for parent tenure ID {}: {e:?}", + &self.parent_tenure_id ); NakamotoNodeError::ParentNotFound })? @@ -847,7 +861,7 @@ impl BlockMinerThread { &parent_tenure_header.consensus_hash, ) .map_err(|e| { - error!("Could not query parent tenure finish block: {:?}", &e); + error!("Could not query parent tenure finish block: {e:?}"); NakamotoNodeError::ParentNotFound })?; if let Some(header) = header_opt { @@ -858,31 +872,27 @@ impl BlockMinerThread { "Stacks block parent ID may be an epoch2x block: {}", &self.parent_tenure_id ); - let epoch2_header = - NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) - .map_err(|e| { - error!( - "Could not query header info for epoch2x tenure block ID {}: {:?}", - &self.parent_tenure_id, &e - ); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!( - "No header info for epoch2x tenure block ID {}", - &self.parent_tenure_id - ); - NakamotoNodeError::ParentNotFound - })?; - - epoch2_header + NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) + .map_err(|e| { + error!( + "Could not query header info for epoch2x tenure block ID {}: {e:?}", + &self.parent_tenure_id + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No header info for epoch2x tenure block ID {}", + &self.parent_tenure_id + ); + NakamotoNodeError::ParentNotFound + })? } }; debug!( - "Miner: stacks tip parent header is {} {:?}", - &stacks_tip_header.index_block_hash(), - &stacks_tip_header + "Miner: stacks tip parent header is {} {stacks_tip_header:?}", + &stacks_tip_header.index_block_hash() ); let miner_address = self .keychain @@ -914,19 +924,19 @@ impl BlockMinerThread { let vrf_proof = if self.config.get_node_config(false).mock_mining { self.keychain.generate_proof( VRF_MOCK_MINER_KEY, - self.burn_block.sortition_hash.as_bytes(), + self.burn_election_block.sortition_hash.as_bytes(), ) } else { self.keychain.generate_proof( self.registered_key.target_block_height, - self.burn_block.sortition_hash.as_bytes(), + self.burn_election_block.sortition_hash.as_bytes(), ) }; debug!( "Generated VRF Proof: {} over {} ({},{}) with key {}", vrf_proof.to_hex(), - &self.burn_block.sortition_hash, + &self.burn_election_block.sortition_hash, &self.burn_block.block_height, &self.burn_block.burn_header_hash, &self.registered_key.vrf_public_key.to_hex() @@ -966,8 +976,8 @@ impl BlockMinerThread { NakamotoChainState::get_block_header(chain_state.db(), &x.header.parent_block_id) .map_err(|e| { error!( - "Could not query header info for parent block ID {}: {:?}", - &x.header.parent_block_id, &e + "Could not query header info for parent block ID {}: {e:?}", + &x.header.parent_block_id ); NakamotoNodeError::ParentNotFound })? @@ -1136,9 +1146,9 @@ impl BlockMinerThread { let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let mut payload = TenureChangePayload { - tenure_consensus_hash: self.burn_election_block.consensus_hash.clone(), + tenure_consensus_hash: self.burn_election_block.consensus_hash, prev_tenure_consensus_hash: parent_tenure_info.parent_tenure_consensus_hash, - burn_view_consensus_hash: self.burn_election_block.consensus_hash.clone(), + burn_view_consensus_hash: self.burn_election_block.consensus_hash, previous_tenure_end: parent_block_id, previous_tenure_blocks: u32::try_from(parent_tenure_info.parent_tenure_blocks) .expect("FATAL: more than u32 blocks in a tenure"), @@ -1147,7 +1157,7 @@ impl BlockMinerThread { }; let (tenure_change_tx, coinbase_tx) = match &self.reason { - MinerReason::BlockFound => { + MinerReason::BlockFound | MinerReason::EmptyTenure => { let tenure_change_tx = self.generate_tenure_change_tx(current_miner_nonce, payload)?; let coinbase_tx = @@ -1241,7 +1251,7 @@ impl ParentStacksBlockInfo { } let Ok(Some(parent_tenure_header)) = - NakamotoChainState::get_block_header(chain_state.db(), &parent_tenure_id) + NakamotoChainState::get_block_header(chain_state.db(), parent_tenure_id) else { warn!("Failed loading parent tenure ID"; "parent_tenure_id" => %parent_tenure_id); return Err(NakamotoNodeError::ParentNotFound); @@ -1282,7 +1292,7 @@ impl ParentStacksBlockInfo { } else { 1 }; - let parent_tenure_consensus_hash = parent_tenure_header.consensus_hash.clone(); + let parent_tenure_consensus_hash = parent_tenure_header.consensus_hash; Some(ParentTenureInfo { parent_tenure_blocks, parent_tenure_consensus_hash, @@ -1310,7 +1320,7 @@ impl ParentStacksBlockInfo { let account = chain_state .with_read_only_clarity_tx( &burn_db - .index_handle_at_block(&chain_state, &stacks_tip_header.index_block_hash()) + .index_handle_at_block(chain_state, &stacks_tip_header.index_block_hash()) .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &stacks_tip_header.index_block_hash(), |conn| StacksChainState::get_account(conn, &principal), diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 004023ea26..3c4e6a98f4 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::VecDeque; use std::net::SocketAddr; use std::sync::mpsc::TrySendError; use std::thread; @@ -54,11 +53,9 @@ pub struct PeerThread { chainstate: StacksChainState, /// handle to the mempool DB mempool: MemPoolDB, - /// buffer of relayer commands with block data that couldn't be sent to the relayer just yet - /// (i.e. due to backpressure). We track this separately, instead of just using a bigger - /// channel, because we need to know when backpressure occurs in order to throttle the p2p - /// thread's downloader. - results_with_data: VecDeque, + /// Buffered network result relayer command. + /// P2P network results are consolidated into a single directive. + results_with_data: Option, /// total number of p2p state-machine passes so far. Used to signal when to download the next /// reward cycle of blocks num_p2p_state_machine_passes: u64, @@ -199,7 +196,7 @@ impl PeerThread { sortdb, chainstate, mempool, - results_with_data: VecDeque::new(), + results_with_data: None, num_p2p_state_machine_passes: 0, num_inv_sync_passes: 0, num_download_passes: 0, @@ -227,6 +224,7 @@ impl PeerThread { /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not + #[allow(clippy::borrowed_box)] pub(crate) fn run_one_pass( &mut self, indexer: &B, @@ -238,12 +236,22 @@ impl PeerThread { ) -> bool { // initial block download? let ibd = self.globals.sync_comms.get_ibd(); - let download_backpressure = self.results_with_data.len() > 0; + let download_backpressure = self + .results_with_data + .as_ref() + .map(|res| { + if let RelayerDirective::HandleNetResult(netres) = &res { + netres.has_block_data_to_store() + } else { + false + } + }) + .unwrap_or(false); + let poll_ms = if !download_backpressure && self.net.has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( - "P2P: backpressure: {}, more downloads: {}", - download_backpressure, + "P2P: backpressure: {download_backpressure}, more downloads: {}", self.net.has_more_downloads() ); 1 @@ -258,7 +266,7 @@ impl PeerThread { // NOTE: handler_args must be created such that it outlives the inner net.run() call and // doesn't ref anything within p2p_thread. let handler_args = RPCHandlerArgs { - exit_at_block_height: self.config.burnchain.process_exit_at_block_height.clone(), + exit_at_block_height: self.config.burnchain.process_exit_at_block_height, genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) .unwrap(), event_observer: Some(event_dispatcher), @@ -266,7 +274,6 @@ impl PeerThread { cost_metric: Some(cost_metric.as_ref()), fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), coord_comms: Some(&self.globals.coord_comms), - ..RPCHandlerArgs::default() }; self.net.run( indexer, @@ -282,7 +289,6 @@ impl PeerThread { }; match p2p_res { Ok(network_result) => { - let mut have_update = false; if self.num_p2p_state_machine_passes < network_result.num_state_machine_passes { // p2p state-machine did a full pass. Notify anyone listening. self.globals.sync_comms.notify_p2p_state_pass(); @@ -293,52 +299,51 @@ impl PeerThread { // inv-sync state-machine did a full pass. Notify anyone listening. self.globals.sync_comms.notify_inv_sync_pass(); self.num_inv_sync_passes = network_result.num_inv_sync_passes; - - // the relayer cares about the number of inventory passes, so pass this along - have_update = true; } if self.num_download_passes < network_result.num_download_passes { // download state-machine did a full pass. Notify anyone listening. self.globals.sync_comms.notify_download_pass(); self.num_download_passes = network_result.num_download_passes; - - // the relayer cares about the number of download passes, so pass this along - have_update = true; } - if network_result.has_data_to_store() - || self.last_burn_block_height != network_result.burn_height - || have_update - { - // pass along if we have blocks, microblocks, or transactions, or a status - // update on the network's view of the burnchain - self.last_burn_block_height = network_result.burn_height; - self.results_with_data - .push_back(RelayerDirective::HandleNetResult(network_result)); + self.last_burn_block_height = network_result.burn_height; + if let Some(res) = self.results_with_data.take() { + if let RelayerDirective::HandleNetResult(netres) = res { + let new_res = netres.update(network_result); + self.results_with_data = Some(RelayerDirective::HandleNetResult(new_res)); + } + } else { + self.results_with_data = + Some(RelayerDirective::HandleNetResult(network_result)); } + + self.globals.raise_initiative( + "PeerThread::run_one_pass() with data-bearing network result".to_string(), + ); } Err(e) => { // this is only reachable if the network is not instantiated correctly -- // i.e. you didn't connect it - panic!("P2P: Failed to process network dispatch: {:?}", &e); + panic!("P2P: Failed to process network dispatch: {e:?}"); } }; - while let Some(next_result) = self.results_with_data.pop_front() { + if let Some(next_result) = self.results_with_data.take() { // have blocks, microblocks, and/or transactions (don't care about anything else), // or a directive to mine microblocks + self.globals.raise_initiative( + "PeerThread::run_one_pass() with backlogged network results".to_string(), + ); if let Err(e) = self.globals.relay_send.try_send(next_result) { debug!( - "P2P: {:?}: download backpressure detected (bufferred {})", + "P2P: {:?}: download backpressure detected", &self.net.local_peer, - self.results_with_data.len() ); match e { TrySendError::Full(directive) => { // don't lose this data -- just try it again - self.results_with_data.push_front(directive); - break; + self.results_with_data = Some(directive); } TrySendError::Disconnected(_) => { info!("P2P: Relayer hang up with p2p channel"); @@ -347,13 +352,7 @@ impl PeerThread { } } } else { - debug!( - "P2P: Dispatched result to Relayer! {} results remaining", - self.results_with_data.len() - ); - self.globals.raise_initiative( - "PeerThread::run_one_pass() with data-bearing network result".to_string(), - ); + debug!("P2P: Dispatched result to Relayer!",); } } diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index ef01f67f4b..7c8dc6f2c5 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -67,6 +67,7 @@ use crate::run_loop::RegisteredKey; use crate::BitcoinRegtestController; /// Command types for the Nakamoto relayer thread, issued to it by other threads +#[allow(clippy::large_enum_variant)] pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and HandleNetResult(NetworkResult), @@ -142,7 +143,7 @@ impl LastCommit { /// What's the parent tenure's tenure-start block hash? pub fn parent_tenure_id(&self) -> StacksBlockId { - StacksBlockId(self.block_commit.block_header_hash.clone().0) + StacksBlockId(self.block_commit.block_header_hash.0) } /// What's the stacks tip at the time of commit? @@ -167,7 +168,7 @@ impl LastCommit { /// Set our txid pub fn set_txid(&mut self, txid: &Txid) { - self.txid = Some(txid.clone()); + self.txid = Some(*txid); } } @@ -235,6 +236,8 @@ pub struct RelayerThread { /// Information about the last-sent block commit, and the relayer's view of the chain at the /// time it was sent. last_committed: Option, + /// Timeout for waiting for the first block in a tenure before submitting a block commit + new_tenure_timeout: Option, } impl RelayerThread { @@ -292,6 +295,7 @@ impl RelayerThread { is_miner, next_initiative: Instant::now() + Duration::from_millis(next_initiative_delay), last_committed: None, + new_tenure_timeout: None, } } @@ -304,9 +308,7 @@ impl RelayerThread { /// chain tip? fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place - (self.min_network_download_passes <= self.last_network_download_passes - // a network inv pass took place - && self.min_network_download_passes <= self.last_network_download_passes) + self.min_network_download_passes <= self.last_network_download_passes // we waited long enough for a download pass, but timed out waiting || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() // we're not supposed to wait at all @@ -472,7 +474,7 @@ impl RelayerThread { .expect("FATAL: failed to query sortition DB"); if cur_sn.consensus_hash != consensus_hash { - info!("Relayer: Current sortition {} is ahead of processed sortition {}; taking no action", &cur_sn.consensus_hash, consensus_hash); + info!("Relayer: Current sortition {} is ahead of processed sortition {consensus_hash}; taking no action", &cur_sn.consensus_hash); self.globals .raise_initiative("process_sortition".to_string()); return Ok(None); @@ -497,7 +499,7 @@ impl RelayerThread { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo: miner_pkh.as_bytes().to_vec(), - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 0, txid: Txid([0u8; 32]), block_height: 0, @@ -564,19 +566,17 @@ impl RelayerThread { let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( &mut self.chainstate.index_conn(), &stacks_tip, - &tip_block_ch, + tip_block_ch, ) .map_err(|e| { error!( - "Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", - &stacks_tip, &e + "Relayer: Failed to get tenure-start block header for stacks tip {stacks_tip}: {e:?}" ); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { error!( - "Relayer: Failed to find tenure-start block header for stacks tip {}", - &stacks_tip + "Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip}" ); NakamotoNodeError::ParentNotFound })?; @@ -589,17 +589,11 @@ impl RelayerThread { tip_block_ch, ) .map_err(|e| { - error!( - "Failed to load VRF proof for {} off of {}: {:?}", - tip_block_ch, &stacks_tip, &e - ); + error!("Failed to load VRF proof for {tip_block_ch} off of {stacks_tip}: {e:?}"); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { - error!( - "No block VRF proof for {} off of {}", - tip_block_ch, &stacks_tip - ); + error!("No block VRF proof for {tip_block_ch} off of {stacks_tip}"); NakamotoNodeError::ParentNotFound })?; @@ -612,7 +606,7 @@ impl RelayerThread { &self.burnchain, ) .map_err(|e| { - error!("Relayer: Failure fetching recipient set: {:?}", e); + error!("Relayer: Failure fetching recipient set: {e:?}"); NakamotoNodeError::SnapshotNotFoundForChainTip })?; @@ -730,9 +724,7 @@ impl RelayerThread { /// * last_burn_block corresponds to the canonical sortition DB's chain tip /// * the time of issuance is sufficiently recent /// * there are no unprocessed stacks blocks in the staging DB - /// * the relayer has already tried a download scan that included this sortition (which, if a - /// block was found, would have placed it into the staging DB and marked it as - /// unprocessed) + /// * the relayer has already tried a download scan that included this sortition (which, if a block was found, would have placed it into the staging DB and marked it as unprocessed) /// * a miner thread is not running already fn create_block_miner( &mut self, @@ -750,16 +742,15 @@ impl RelayerThread { return Err(NakamotoNodeError::FaultInjection); } - let burn_header_hash = burn_tip.burn_header_hash.clone(); + let burn_header_hash = burn_tip.burn_header_hash; let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); + let burn_chain_tip = burn_chain_sn.burn_header_hash; if burn_chain_tip != burn_header_hash { debug!( - "Relayer: Drop stale RunTenure for {}: current sortition is for {}", - &burn_header_hash, &burn_chain_tip + "Relayer: Drop stale RunTenure for {burn_header_hash}: current sortition is for {burn_chain_tip}" ); self.globals.counters.bump_missed_tenures(); return Err(NakamotoNodeError::MissedMiningOpportunity); @@ -819,14 +810,14 @@ impl RelayerThread { .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { if let Err(e) = new_miner_state.run_miner(prior_tenure_thread) { - info!("Miner thread failed: {:?}", &e); + info!("Miner thread failed: {e:?}"); Err(e) } else { Ok(()) } }) .map_err(|e| { - error!("Relayer: Failed to start tenure thread: {:?}", &e); + error!("Relayer: Failed to start tenure thread: {e:?}"); NakamotoNodeError::SpawnError(e) })?; debug!( @@ -852,7 +843,7 @@ impl RelayerThread { .name(format!("tenure-stop-{}", self.local_peer.data_url)) .spawn(move || BlockMinerThread::stop_miner(&globals, prior_tenure_thread)) .map_err(|e| { - error!("Relayer: Failed to spawn a stop-tenure thread: {:?}", &e); + error!("Relayer: Failed to spawn a stop-tenure thread: {e:?}"); NakamotoNodeError::SpawnError(e) })?; @@ -883,23 +874,14 @@ impl RelayerThread { SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap(); let canonical_stacks_tip = StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); - let block_election_snapshot = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) - .map_err(|e| { - error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })? - .ok_or_else(|| { - error!("Relayer: failed to get block snapshot for canonical tip"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; let Some(ref mining_key) = self.config.miner.mining_key else { return Ok(()); }; let mining_pkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(mining_key)); - let last_winner_snapshot = { + // If we won the last sortition, then we should start a new tenure off of it. + let last_block_election_snapshot = { let ih = self.sortdb.index_handle(&burn_tip.sortition_id); ih.get_last_snapshot_with_sortition(burn_tip.block_height) .map_err(|e| { @@ -908,15 +890,15 @@ impl RelayerThread { })? }; - let won_last_sortition = last_winner_snapshot.miner_pk_hash == Some(mining_pkh); + let won_last_sortition = last_block_election_snapshot.miner_pk_hash == Some(mining_pkh); debug!( "Relayer: Current burn block had no sortition. Checking for tenure continuation."; "won_last_sortition" => won_last_sortition, "current_mining_pkh" => %mining_pkh, - "last_winner_snapshot.miner_pk_hash" => ?last_winner_snapshot.miner_pk_hash, + "last_block_election_snapshot.consensus_hash" => %last_block_election_snapshot.consensus_hash, + "last_block_election_snapshot.miner_pk_hash" => ?last_block_election_snapshot.miner_pk_hash, "canonical_stacks_tip_id" => %canonical_stacks_tip, "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, - "block_election_ch" => %block_election_snapshot.consensus_hash, "burn_view_ch" => %new_burn_view, ); @@ -924,13 +906,43 @@ impl RelayerThread { return Ok(()); } + let canonical_block_snapshot = + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) + .map_err(|e| { + error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for canonical tip"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + let won_canonical_block_snapshot = + canonical_block_snapshot.miner_pk_hash == Some(mining_pkh); + + let (parent_tenure_start, block_election_snapshot, reason) = + if !won_canonical_block_snapshot { + debug!("Relayer: Failed to issue a tenure change payload in our last tenure. Issue a new tenure change payload."); + ( + StacksBlockId(last_block_election_snapshot.winning_stacks_block_hash.0), + last_block_election_snapshot, + MinerReason::EmptyTenure, + ) + } else { + debug!("Relayer: Successfully issued a tenure change payload in its tenure. Issue a continue extend from the chain tip."); + ( + canonical_stacks_tip, //For tenure extend, we should be extending off the canonical tip + canonical_block_snapshot, + MinerReason::Extended { + burn_view_consensus_hash: new_burn_view, + }, + ) + }; match self.start_new_tenure( - canonical_stacks_tip, // For tenure extend, we should be extending off the canonical tip + parent_tenure_start, block_election_snapshot, burn_tip, - MinerReason::Extended { - burn_view_consensus_hash: new_burn_view, - }, + reason, ) { Ok(()) => { debug!("Relayer: successfully started new tenure."); @@ -955,7 +967,7 @@ impl RelayerThread { return true; } Err(e) => { - warn!("Relayer: process_sortition returned {:?}", &e); + warn!("Relayer: process_sortition returned {e:?}"); return false; } }; @@ -1003,13 +1015,7 @@ impl RelayerThread { #[cfg(test)] fn fault_injection_skip_block_commit(&self) -> bool { - self.globals - .counters - .naka_skip_commit_op - .0 - .lock() - .unwrap() - .unwrap_or(false) + self.globals.counters.naka_skip_commit_op.get() } #[cfg(not(test))] @@ -1033,14 +1039,13 @@ impl RelayerThread { let (cur_stacks_tip_ch, cur_stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap_or_else( |e| { - panic!("Failed to load canonical stacks tip: {:?}", &e); + panic!("Failed to load canonical stacks tip: {e:?}"); }, ); if cur_stacks_tip_ch != tip_block_ch || cur_stacks_tip_bh != tip_block_bh { info!( - "Stacks tip changed prior to commit: {}/{} != {}/{}", - &cur_stacks_tip_ch, &cur_stacks_tip_bh, &tip_block_ch, &tip_block_bh + "Stacks tip changed prior to commit: {cur_stacks_tip_ch}/{cur_stacks_tip_bh} != {tip_block_ch}/{tip_block_bh}" ); return Err(NakamotoNodeError::StacksTipChanged); } @@ -1050,16 +1055,12 @@ impl RelayerThread { &StacksBlockId::new(&tip_block_ch, &tip_block_bh), ) .map_err(|e| { - warn!( - "Relayer: failed to load tip {}/{}: {:?}", - &tip_block_ch, &tip_block_bh, &e - ); + warn!("Relayer: failed to load tip {tip_block_ch}/{tip_block_bh}: {e:?}"); NakamotoNodeError::ParentNotFound })? .map(|header| header.stacks_block_height) else { warn!( - "Relayer: failed to load height for tip {}/{} (got None)", - &tip_block_ch, &tip_block_bh + "Relayer: failed to load height for tip {tip_block_ch}/{tip_block_bh} (got None)" ); return Err(NakamotoNodeError::ParentNotFound); }; @@ -1067,7 +1068,7 @@ impl RelayerThread { // sign and broadcast let mut op_signer = self.keychain.generate_op_signer(); let res = self.bitcoin_controller.submit_operation( - last_committed.get_epoch_id().clone(), + *last_committed.get_epoch_id(), BlockstackOperationType::LeaderBlockCommit(last_committed.get_block_commit().clone()), &mut op_signer, 1, @@ -1131,7 +1132,7 @@ impl RelayerThread { // load up canonical sortition and stacks tips let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()).map_err(|e| { - error!("Failed to load canonical sortition tip: {:?}", &e); + error!("Failed to load canonical sortition tip: {e:?}"); e }) else { @@ -1141,7 +1142,7 @@ impl RelayerThread { // NOTE: this may be an epoch2x tip let Ok((stacks_tip_ch, stacks_tip_bh)) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).map_err(|e| { - error!("Failed to load canonical stacks tip: {:?}", &e); + error!("Failed to load canonical stacks tip: {e:?}"); e }) else { @@ -1179,6 +1180,32 @@ impl RelayerThread { return None; } + if !highest_tenure_changed { + debug!("Relayer: burnchain view changed, but highest tenure did not"); + // The burnchain view changed, but the highest tenure did not, so + // wait a bit for the first block in the new tenure to arrive. This + // is to avoid submitting a block commit that will be immediately + // RBFed when the first block arrives. + if let Some(new_tenure_timeout) = self.new_tenure_timeout { + debug!( + "Relayer: {}s elapsed since burn block arrival", + new_tenure_timeout.elapsed().as_secs(), + ); + if new_tenure_timeout.elapsed() < self.config.miner.block_commit_delay { + return None; + } + } else { + info!( + "Relayer: starting new tenure timeout for {}s", + self.config.miner.block_commit_delay.as_secs() + ); + let timeout = Instant::now() + self.config.miner.block_commit_delay; + self.new_tenure_timeout = Some(Instant::now()); + self.next_initiative = timeout; + return None; + } + } + // burnchain view or highest-tenure view changed, so we need to send (or RBF) a commit Some(RelayerDirective::IssueBlockCommit( stacks_tip_ch, @@ -1198,7 +1225,7 @@ impl RelayerThread { while self.globals.keep_running() { let raised_initiative = self.globals.take_initiative(); let timed_out = Instant::now() >= self.next_initiative; - let directive = if raised_initiative.is_some() || timed_out { + let mut initiative_directive = if raised_initiative.is_some() || timed_out { self.next_initiative = Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); self.initiative() @@ -1206,13 +1233,17 @@ impl RelayerThread { None }; - let directive = if let Some(directive) = directive { + let directive = if let Some(directive) = initiative_directive.take() { directive } else { + // channel was drained, so do a time-bound recv match relay_rcv.recv_timeout(Duration::from_millis( self.config.node.next_initiative_delay, )) { - Ok(directive) => directive, + Ok(directive) => { + // only do this once, so we can call .initiative() again + directive + } Err(RecvTimeoutError::Timeout) => { continue; } @@ -1224,7 +1255,7 @@ impl RelayerThread { debug!("Relayer: main loop directive"; "directive" => %directive, - "raised_initiative" => %raised_initiative.unwrap_or("relay_rcv".to_string()), + "raised_initiative" => ?raised_initiative, "timed_out" => %timed_out); if !self.handle_directive(directive) { @@ -1246,25 +1277,19 @@ impl RelayerThread { let mut f = match fs::File::open(path) { Ok(f) => f, Err(e) => { - warn!("Could not open {}: {:?}", &path, &e); + warn!("Could not open {path}: {e:?}"); return None; } }; let mut registered_key_bytes = vec![]; if let Err(e) = f.read_to_end(&mut registered_key_bytes) { - warn!( - "Failed to read registered key bytes from {}: {:?}", - path, &e - ); + warn!("Failed to read registered key bytes from {path}: {e:?}"); return None; } let Ok(registered_key) = serde_json::from_slice::(®istered_key_bytes) else { - warn!( - "Did not load registered key from {}: could not decode JSON", - &path - ); + warn!("Did not load registered key from {path}: could not decode JSON"); return None; }; @@ -1274,7 +1299,7 @@ impl RelayerThread { return None; } - info!("Loaded registered key from {}", &path); + info!("Loaded registered key from {path}"); Some(registered_key) } @@ -1299,7 +1324,7 @@ impl RelayerThread { let mut saved_key_opt = None; if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { saved_key_opt = - Self::load_saved_vrf_key(&path, &self.keychain.get_nakamoto_pkh()); + Self::load_saved_vrf_key(path, &self.keychain.get_nakamoto_pkh()); } if let Some(saved_key) = saved_key_opt { debug!("Relayer: resuming VRF key"); @@ -1371,9 +1396,9 @@ pub mod test { let pubkey_hash = Hash160::from_node_public_key(&pk); let path = "/tmp/does_not_exist.json"; - _ = std::fs::remove_file(&path); + _ = std::fs::remove_file(path); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); } @@ -1384,13 +1409,13 @@ pub mod test { let pubkey_hash = Hash160::from_node_public_key(&pk); let path = "/tmp/empty.json"; - File::create(&path).expect("Failed to create test file"); - assert!(Path::new(&path).exists()); + File::create(path).expect("Failed to create test file"); + assert!(Path::new(path).exists()); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1403,15 +1428,15 @@ pub mod test { let json_content = r#"{ "hello": "world" }"#; // Write the JSON content to the file - let mut file = File::create(&path).expect("Failed to create test file"); + let mut file = File::create(path).expect("Failed to create test file"); file.write_all(json_content.as_bytes()) .expect("Failed to write to test file"); - assert!(Path::new(&path).exists()); + assert!(Path::new(path).exists()); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1432,10 +1457,10 @@ pub mod test { let path = "/tmp/vrf_key.json"; save_activated_vrf_key(path, &key); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_some()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1460,9 +1485,9 @@ pub mod test { let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); let pubkey_hash = Hash160::from_node_public_key(&pk); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 697dddeb03..14eeef20b9 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -91,7 +91,7 @@ impl SignCoordinator { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { error!("Could not initialize signing coordinator for reward set without signer"); - debug!("reward set: {:?}", &reward_set); + debug!("reward set: {reward_set:?}"); return Err(ChainstateError::NoRegisteredSigners(0)); }; @@ -188,6 +188,7 @@ impl SignCoordinator { } /// Send a message over the miners contract using a `StacksPrivateKey` + #[allow(clippy::too_many_arguments)] pub fn send_miners_message( miner_sk: &StacksPrivateKey, sortdb: &SortitionDB, @@ -199,7 +200,7 @@ impl SignCoordinator { miners_session: &mut StackerDBSession, election_sortition: &ConsensusHash, ) -> Result<(), String> { - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &election_sortition) + let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, election_sortition) .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? else { return Err("No slot for miner".into()); @@ -222,7 +223,7 @@ impl SignCoordinator { .saturating_add(1); let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message.serialize_to_vec()); chunk - .sign(&miner_sk) + .sign(miner_sk) .map_err(|_| "Failed to sign StackerDB chunk")?; match miners_session.put_chunk(&chunk) { @@ -270,13 +271,14 @@ impl SignCoordinator { /// to the signers, and then waits for the signers to respond /// with their signatures. It does so in two ways, concurrently: /// * It waits for signer StackerDB messages with signatures. If enough signatures can be - /// found, then the block can be broadcast. + /// found, then the block can be broadcast. /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are - /// loaded and returned. This can happen if the node receives the block via a signer who - /// fetched all signatures and assembled the signature vector, all before we could. + /// loaded and returned. This can happen if the node receives the block via a signer who + /// fetched all signatures and assembled the signature vector, all before we could. // Mutants skip here: this function is covered via integration tests, // which the mutation testing does not see. #[cfg_attr(test, mutants::skip)] + #[allow(clippy::too_many_arguments)] pub fn run_sign_v0( &mut self, block: &NakamotoBlock, @@ -306,7 +308,7 @@ impl SignCoordinator { &self.message_key, sortdb, burn_tip, - &stackerdbs, + stackerdbs, block_proposal_message, MinerSlotID::BlockProposal, self.is_mainnet, @@ -355,9 +357,8 @@ impl SignCoordinator { .get_nakamoto_block(&block.block_id()) .map_err(|e| { warn!( - "Failed to query chainstate for block {}: {:?}", - &block.block_id(), - &e + "Failed to query chainstate for block {}: {e:?}", + &block.block_id() ); e }) @@ -367,7 +368,7 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } - if Self::check_burn_tip_changed(&sortdb, &burn_tip) { + if Self::check_burn_tip_changed(sortdb, burn_tip) { debug!("SignCoordinator: Exiting due to new burnchain tip"); return Err(NakamotoNodeError::BurnchainTipChanged); } @@ -549,8 +550,7 @@ impl SignCoordinator { }; responded_signers.insert(rejected_pubkey); debug!( - "Signer {} rejected our block {}/{}", - slot_id, + "Signer {slot_id} rejected our block {}/{}", &block.header.consensus_hash, &block.header.block_hash() ); @@ -562,8 +562,7 @@ impl SignCoordinator { > self.total_weight { debug!( - "{}/{} signers vote to reject our block {}/{}", - total_reject_weight, + "{total_reject_weight}/{} signers vote to reject our block {}/{}", self.total_weight, &block.header.consensus_hash, &block.header.block_hash() diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index dcfa855c9b..63315557a1 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -62,79 +62,85 @@ /// [11] Notifies about new transaction attachment events /// [12] Signals VRF key registration /// -/// When the node is running, there are 4-5 active threads at once. They are: +/// When the node is running, there are 4-5 active threads at once. They are: /// -/// * **RunLoop Thread**: This is the main thread, whose code body lives in src/run_loop/neon.rs. -/// This thread is responsible for: -/// * Bootup -/// * Running the burnchain indexer -/// * Notifying the ChainsCoordinator thread when there are new burnchain blocks to process +/// * **RunLoop Thread**: +/// This is the main thread, whose code body lives in `src/run_loop/neon.rs`. +/// This thread is responsible for: +/// * Bootup +/// * Running the burnchain indexer +/// * Notifying the ChainsCoordinator thread when there are new burnchain blocks to process /// -/// * **Relayer Thread**: This is the thread that stores and relays blocks and microblocks. Both -/// it and the ChainsCoordinator thread are very I/O-heavy threads, and care has been taken to -/// ensure that neither one attempts to acquire a write-lock in the underlying databases. -/// Specifically, this thread directs the ChainsCoordinator thread when to process new Stacks -/// blocks, and it directs the miner thread (if running) to stop when either it or the -/// ChainsCoordinator thread needs to acquire the write-lock. -/// This thread is responsible for: -/// * Receiving new blocks and microblocks from the P2P thread via a shared channel -/// * (Sychronously) requesting the CoordinatorThread to process newly-stored Stacks blocks and -/// microblocks -/// * Building up the node's unconfirmed microblock stream state, and sharing it with the P2P -/// thread so it can answer queries about the unconfirmed microblock chain -/// * Pushing newly-discovered blocks and microblocks to the P2P thread for broadcast -/// * Registering the VRF public key for the miner -/// * Spawning the block and microblock miner threads, and stopping them if their continued -/// execution would inhibit block or microblock storage or processing. -/// * Submitting the burnchain operation to commit to a freshly-mined block +/// * **Relayer Thread**: +/// This is the thread that stores and relays blocks and microblocks. Both +/// it and the ChainsCoordinator thread are very I/O-heavy threads, and care has been taken to +/// ensure that neither one attempts to acquire a write-lock in the underlying databases. +/// Specifically, this thread directs the ChainsCoordinator thread when to process new Stacks +/// blocks, and it directs the miner thread (if running) to stop when either it or the +/// ChainsCoordinator thread needs to acquire the write-lock. +/// This thread is responsible for: +/// * Receiving new blocks and microblocks from the P2P thread via a shared channel +/// * (Synchronously) requesting the CoordinatorThread to process newly-stored Stacks blocks +/// and microblocks +/// * Building up the node's unconfirmed microblock stream state, and sharing it with the P2P +/// thread so it can answer queries about the unconfirmed microblock chain +/// * Pushing newly-discovered blocks and microblocks to the P2P thread for broadcast +/// * Registering the VRF public key for the miner +/// * Spawning the block and microblock miner threads, and stopping them if their continued +/// execution would inhibit block or microblock storage or processing. +/// * Submitting the burnchain operation to commit to a freshly-mined block /// -/// * **Miner thread**: This is the thread that actually produces new blocks and microblocks. It -/// is spawned only by the Relayer thread to carry out mining activity when the underlying -/// chainstate is not needed by either the Relayer or ChainsCoordinator threeads. -/// This thread does the following: -/// * Walk the mempool DB to build a new block or microblock -/// * Return the block or microblock to the Relayer thread +/// * **Miner Thread**: +/// This is the thread that actually produces new blocks and microblocks. It +/// is spawned only by the Relayer thread to carry out mining activity when the underlying +/// chainstate is not needed by either the Relayer or ChainsCoordinator threads. +/// This thread does the following: +/// * Walk the mempool DB to build a new block or microblock +/// * Return the block or microblock to the Relayer thread /// -/// * **P2P Thread**: This is the thread that communicates with the rest of the p2p network, and -/// handles RPC requests. It is meant to do as little storage-write I/O as possible to avoid lock -/// contention with the Miner, Relayer, and ChainsCoordinator threads. In particular, it forwards -/// data it receives from the p2p thread to the Relayer thread for I/O-bound processing. At the -/// time of this writing, it still requires holding a write-lock to handle some RPC request, but -/// future work will remove this so that this thread's execution will not interfere with the -/// others. This is the only thread that does socket I/O. -/// This thread runs the PeerNetwork state machines, which include the following: -/// * Learning the node's public IP address -/// * Discovering neighbor nodes -/// * Forwarding newly-discovered blocks, microblocks, and transactions from the Relayer thread to -/// other neighbors -/// * Synchronizing block and microblock inventory state with other neighbors -/// * Downloading blocks and microblocks, and passing them to the Relayer for storage and processing -/// * Downloading transaction attachments as their hashes are discovered during block processing -/// * Synchronizing the local mempool database with other neighbors -/// (notifications for new attachments come from a shared channel in the ChainsCoordinator thread) -/// * Handling HTTP requests +/// * **P2P Thread**: +/// This is the thread that communicates with the rest of the P2P network, and +/// handles RPC requests. It is meant to do as little storage-write I/O as possible to avoid lock +/// contention with the Miner, Relayer, and ChainsCoordinator threads. In particular, it forwards +/// data it receives from the P2P thread to the Relayer thread for I/O-bound processing. At the +/// time of this writing, it still requires holding a write-lock to handle some RPC requests, but +/// future work will remove this so that this thread's execution will not interfere with the +/// others. This is the only thread that does socket I/O. +/// This thread runs the PeerNetwork state machines, which include the following: +/// * Learning the node's public IP address +/// * Discovering neighbor nodes +/// * Forwarding newly-discovered blocks, microblocks, and transactions from the Relayer thread +/// to other neighbors +/// * Synchronizing block and microblock inventory state with other neighbors +/// * Downloading blocks and microblocks, and passing them to the Relayer for storage and +/// processing +/// * Downloading transaction attachments as their hashes are discovered during block processing +/// * Synchronizing the local mempool database with other neighbors +/// (notifications for new attachments come from a shared channel in the ChainsCoordinator thread) +/// * Handling HTTP requests /// -/// * **ChainsCoordinator Thread**: This thread process sortitions and Stacks blocks and -/// microblocks, and handles PoX reorgs should they occur (this mainly happens in boot-up). It, -/// like the Relayer thread, is a very I/O-heavy thread, and it will hold a write-lock on the -/// chainstate DBs while it works. Its actions are controlled by a CoordinatorComms structure in -/// the Globals shared state, which the Relayer thread and RunLoop thread both drive (the former -/// drives Stacks blocks processing, the latter sortitions). -/// This thread is responsible for: -/// * Responding to requests from other threads to process sortitions -/// * Responding to requests from other threads to process Stacks blocks and microblocks -/// * Processing PoX chain reorgs, should they ever happen -/// * Detecting attachment creation events, and informing the P2P thread of them so it can go -/// and download them +/// * **ChainsCoordinator Thread**: +/// This thread processes sortitions and Stacks blocks and +/// microblocks, and handles PoX reorgs should they occur (this mainly happens in boot-up). It, +/// like the Relayer thread, is a very I/O-heavy thread, and it will hold a write-lock on the +/// chainstate DBs while it works. Its actions are controlled by a CoordinatorComms structure in +/// the Globals shared state, which the Relayer thread and RunLoop thread both drive (the former +/// drives Stacks blocks processing, the latter sortitions). +/// This thread is responsible for: +/// * Responding to requests from other threads to process sortitions +/// * Responding to requests from other threads to process Stacks blocks and microblocks +/// * Processing PoX chain reorgs, should they ever happen +/// * Detecting attachment creation events, and informing the P2P thread of them so it can go +/// and download them /// /// In addition to the mempool and chainstate databases, these threads share access to a Globals -/// singleton that contains soft state shared between threads. Mainly, the Globals struct is meant -/// to store inter-thread shared singleton communication media all in one convenient struct. Each -/// thread has a handle to the struct's shared state handles. Global state includes: -/// * The global flag as to whether or not the miner thread can be running -/// * The global shutdown flag that, when set, causes all threads to terminate -/// * Sender channel endpoints that can be shared between threads -/// * Metrics about the node's behavior (e.g. number of blocks processed, etc.) +/// singleton that contains soft state shared between threads. Mainly, the Globals struct is meant +/// to store inter-thread shared singleton communication media all in one convenient struct. Each +/// thread has a handle to the struct's shared state handles. Global state includes: +/// * The global flag as to whether or not the miner thread can be running +/// * The global shutdown flag that, when set, causes all threads to terminate +/// * Sender channel endpoints that can be shared between threads +/// * Metrics about the node's behavior (e.g. number of blocks processed, etc.) /// /// This file may be refactored in the future into a full-fledged module. use std::cmp; @@ -230,6 +236,7 @@ pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB type MinedBlocks = HashMap; /// Result of running the miner thread. It could produce a Stacks block or a microblock. +#[allow(clippy::large_enum_variant)] pub(crate) enum MinerThreadResult { Block( AssembledAnchorBlock, @@ -303,10 +310,7 @@ pub(crate) fn fault_injection_long_tenure() { error!("Parse error for STX_TEST_SLOW_TENURE"); panic!(); }; - info!( - "Fault injection: sleeping for {} milliseconds to simulate a long tenure", - tenure_time - ); + info!("Fault injection: sleeping for {tenure_time} milliseconds to simulate a long tenure"); stacks_common::util::sleep_ms(tenure_time); } @@ -571,10 +575,7 @@ impl MicroblockMinerThread { // This is an artifact of the way the MARF is built (see #1449) let sortdb = SortitionDB::open(&burn_db_path, true, burnchain.pox_constants) .map_err(|e| { - error!( - "Relayer: Could not open sortdb '{}' ({:?}); skipping tenure", - &burn_db_path, &e - ); + error!("Relayer: Could not open sortdb '{burn_db_path}' ({e:?}); skipping tenure"); e }) .ok()?; @@ -582,8 +583,7 @@ impl MicroblockMinerThread { let mut chainstate = open_chainstate_with_faults(&config) .map_err(|e| { error!( - "Relayer: Could not open chainstate '{}' ({:?}); skipping microblock tenure", - &stacks_chainstate_path, &e + "Relayer: Could not open chainstate '{stacks_chainstate_path}' ({e:?}); skipping microblock tenure" ); e }) @@ -605,10 +605,7 @@ impl MicroblockMinerThread { .. } = miner_tip; - debug!( - "Relayer: Instantiate microblock mining state off of {}/{}", - &ch, &bhh - ); + debug!("Relayer: Instantiate microblock mining state off of {ch}/{bhh}"); // we won a block! proceed to build a microblock tail if we've stored it match StacksChainState::get_anchored_block_header_info(chainstate.db(), &ch, &bhh) { @@ -646,8 +643,8 @@ impl MicroblockMinerThread { sortdb: Some(sortdb), mempool: Some(mempool), event_dispatcher: relayer_thread.event_dispatcher.clone(), - parent_consensus_hash: ch.clone(), - parent_block_hash: bhh.clone(), + parent_consensus_hash: ch, + parent_block_hash: bhh, miner_key, frequency, last_mined: 0, @@ -657,17 +654,11 @@ impl MicroblockMinerThread { }) } Ok(None) => { - warn!( - "Relayer: No such anchored block: {}/{}. Cannot mine microblocks", - ch, bhh - ); + warn!("Relayer: No such anchored block: {ch}/{bhh}. Cannot mine microblocks"); None } Err(e) => { - warn!( - "Relayer: Failed to get anchored block cost for {}/{}: {:?}", - ch, bhh, &e - ); + warn!("Relayer: Failed to get anchored block cost for {ch}/{bhh}: {e:?}"); None } } @@ -719,7 +710,7 @@ impl MicroblockMinerThread { let block_snapshot = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &self.parent_consensus_hash) .map_err(|e| { - error!("Failed to find block snapshot for mined block: {}", e); + error!("Failed to find block snapshot for mined block: {e}"); e })? .ok_or_else(|| { @@ -729,13 +720,13 @@ impl MicroblockMinerThread { let burn_height = block_snapshot.block_height; let ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), burn_height).map_err(|e| { - error!("Failed to get AST rules for microblock: {}", e); + error!("Failed to get AST rules for microblock: {e}"); e })?; let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), burn_height) .map_err(|e| { - error!("Failed to get epoch for microblock: {}", e); + error!("Failed to get epoch for microblock: {e}"); e })? .expect("FATAL: no epoch defined") @@ -743,7 +734,7 @@ impl MicroblockMinerThread { let mint_result = { let ic = sortdb.index_handle_at_block( - &chainstate, + chainstate, &block_snapshot.get_canonical_stacks_block_id(), )?; let mut microblock_miner = match StacksMicroblockBuilder::resume_unconfirmed( @@ -755,10 +746,10 @@ impl MicroblockMinerThread { Ok(x) => x, Err(e) => { let msg = format!( - "Failed to create a microblock miner at chaintip {}/{}: {:?}", - &self.parent_consensus_hash, &self.parent_block_hash, &e + "Failed to create a microblock miner at chaintip {}/{}: {e:?}", + &self.parent_consensus_hash, &self.parent_block_hash ); - error!("{}", msg); + error!("{msg}"); return Err(e); } }; @@ -787,7 +778,7 @@ impl MicroblockMinerThread { let (mined_microblock, new_cost) = match mint_result { Ok(x) => x, Err(e) => { - warn!("Failed to mine microblock: {}", e); + warn!("Failed to mine microblock: {e}"); return Err(e); } }; @@ -810,25 +801,25 @@ impl MicroblockMinerThread { use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { // record this microblock somewhere - if !fs::metadata(&path).is_ok() { + if fs::metadata(&path).is_err() { fs::create_dir_all(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path}'")); } let path = Path::new(&path); let path = path.join(Path::new(&format!("{}", &mined_microblock.block_hash()))); let mut file = fs::File::create(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{:?}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path:?}'")); let mblock_bits = mined_microblock.serialize_to_vec(); let mblock_bits_hex = to_hex(&mblock_bits); let mblock_json = format!( - r#"{{"microblock":"{}","parent_consensus":"{}","parent_block":"{}"}}"#, - &mblock_bits_hex, &self.parent_consensus_hash, &self.parent_block_hash + r#"{{"microblock":"{mblock_bits_hex}","parent_consensus":"{}","parent_block":"{}"}}"#, + &self.parent_consensus_hash, &self.parent_block_hash ); - file.write_all(&mblock_json.as_bytes()).unwrap_or_else(|_| { - panic!("FATAL: failed to write microblock bits to '{:?}'", &path) + file.write_all(mblock_json.as_bytes()).unwrap_or_else(|_| { + panic!("FATAL: failed to write microblock bits to '{path:?}'") }); info!( "Fault injection: bad microblock {} saved to {}", @@ -873,7 +864,7 @@ impl MicroblockMinerThread { // update unconfirmed state cost self.cost_so_far = new_cost; self.quantity += 1; - return Ok(mined_microblock); + Ok(mined_microblock) } /// Can this microblock miner mine off of this given tip? @@ -926,11 +917,11 @@ impl MicroblockMinerThread { info!("Will keep polling mempool for transactions to include in a microblock"); } Err(e) => { - warn!("Failed to mine one microblock: {:?}", &e); + warn!("Failed to mine one microblock: {e:?}"); } } } else { - debug!("Will not mine microblocks yet -- have {} attachable blocks that arrived in the last 10 minutes", num_attachable); + debug!("Will not mine microblocks yet -- have {num_attachable} attachable blocks that arrived in the last 10 minutes"); } self.last_mined = get_epoch_time_ms(); @@ -1086,6 +1077,7 @@ impl BlockMinerThread { } /// Constructs and returns a LeaderBlockCommitOp out of the provided params. + #[allow(clippy::too_many_arguments)] fn inner_generate_block_commit_op( &self, block_header_hash: BlockHeaderHash, @@ -1202,7 +1194,7 @@ impl BlockMinerThread { .expect("FATAL: could not query chain tips") }; - if stacks_tips.len() == 0 { + if stacks_tips.is_empty() { return vec![]; } @@ -1213,7 +1205,7 @@ impl BlockMinerThread { .filter(|candidate| Self::is_on_canonical_burnchain_fork(candidate, &sortdb_tip_handle)) .collect(); - if stacks_tips.len() == 0 { + if stacks_tips.is_empty() { return vec![]; } @@ -1269,7 +1261,7 @@ impl BlockMinerThread { pub(crate) fn sort_and_populate_candidates( mut candidates: Vec, ) -> Vec { - if candidates.len() == 0 { + if candidates.is_empty() { return candidates; } candidates.sort_by(|tip1, tip2| { @@ -1373,7 +1365,7 @@ impl BlockMinerThread { // identify leaf tips -- i.e. blocks with no children let parent_consensus_hashes: HashSet<_> = stacks_tips .iter() - .map(|x| x.parent_consensus_hash.clone()) + .map(|x| x.parent_consensus_hash) .collect(); let mut leaf_tips: Vec<_> = stacks_tips @@ -1381,7 +1373,7 @@ impl BlockMinerThread { .filter(|x| !parent_consensus_hashes.contains(&x.consensus_hash)) .collect(); - if leaf_tips.len() == 0 { + if leaf_tips.is_empty() { return None; } @@ -1427,8 +1419,7 @@ impl BlockMinerThread { { // This leaf does not confirm a previous-best-tip, so assign it the // worst-possible score. - info!("Tip #{} {}/{} at {}:{} conflicts with a previous best-tip {}/{} at {}:{}", - i, + info!("Tip #{i} {}/{} at {}:{} conflicts with a previous best-tip {}/{} at {}:{}", &leaf_tip.consensus_hash, &leaf_tip.anchored_block_hash, leaf_tip.burn_height, @@ -1488,13 +1479,11 @@ impl BlockMinerThread { } info!( - "Tip #{} {}/{} at {}:{} has score {} ({})", - i, + "Tip #{i} {}/{} at {}:{} has score {score} ({})", &leaf_tip.consensus_hash, &leaf_tip.anchored_block_hash, leaf_tip.burn_height, leaf_tip.stacks_height, - score, score_summaries.join(" + ").to_string() ); if score < u64::MAX { @@ -1502,7 +1491,7 @@ impl BlockMinerThread { } } - if scores.len() == 0 { + if scores.is_empty() { // revert to prior tie-breaking scheme return None; } @@ -1519,8 +1508,8 @@ impl BlockMinerThread { .expect("FATAL: candidates should not be empty"); info!( - "Best tip is #{} {}/{}", - best_tip_idx, &best_tip.consensus_hash, &best_tip.anchored_block_hash + "Best tip is #{best_tip_idx} {}/{}", + &best_tip.consensus_hash, &best_tip.anchored_block_hash ); Some((*best_tip).clone()) } @@ -1576,14 +1565,14 @@ impl BlockMinerThread { let chain_tip = ChainTip::genesis( &burnchain_params.first_block_hash, - burnchain_params.first_block_height.into(), + burnchain_params.first_block_height, burnchain_params.first_block_timestamp.into(), ); ( Some(ParentStacksBlockInfo { stacks_parent_header: chain_tip.metadata, - parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH, parent_block_burn_height: 0, parent_block_total_burn: 0, parent_winning_vtxindex: 0, @@ -1671,7 +1660,7 @@ impl BlockMinerThread { { if (prev_block.anchored_block.header.parent_microblock == BlockHeaderHash([0u8; 32]) - && stream.len() == 0) + && stream.is_empty()) || (prev_block.anchored_block.header.parent_microblock != BlockHeaderHash([0u8; 32]) && stream.len() @@ -1682,9 +1671,9 @@ impl BlockMinerThread { if !force { // the chain tip hasn't changed since we attempted to build a block. Use what we // already have. - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no new microblocks ({} <= {} + 1)", + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, and no new microblocks ({} <= {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); return None; } @@ -1693,36 +1682,32 @@ impl BlockMinerThread { // TODO: only consider rebuilding our anchored block if we (a) have // time, and (b) the new microblocks are worth more than the new BTC // fee minus the old BTC fee - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, but there are new microblocks ({} > {} + 1)", + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, but there are new microblocks ({} > {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); best_attempt = cmp::max(best_attempt, prev_block.attempt); } - } else { - if !force { - // no microblock stream to confirm, and the stacks tip hasn't changed - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", - &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height); + } else if !force { + // no microblock stream to confirm, and the stacks tip hasn't changed + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, and no microblocks present", + &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, + prev_block.anchored_block.txs.len(), prev_block.burn_hash); - return None; - } + return None; } + } else if self.burn_block.burn_header_hash == prev_block.burn_hash { + // only try and re-mine if there was no sortition since the last chain tip + info!("Relayer: Stacks tip has changed to {parent_consensus_hash}/{} since we last tried to mine a block in {} at burn height {parent_block_burn_height}; attempt was {} (for Stacks tip {}/{})", + stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); + best_attempt = cmp::max(best_attempt, prev_block.attempt); + // Since the chain tip has changed, we should try to mine a new block, even + // if it has less transactions than the previous block we mined, since that + // previous block would now be a reorg. + max_txs = 0; } else { - if self.burn_block.burn_header_hash == prev_block.burn_hash { - // only try and re-mine if there was no sortition since the last chain tip - info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", - parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); - best_attempt = cmp::max(best_attempt, prev_block.attempt); - // Since the chain tip has changed, we should try to mine a new block, even - // if it has less transactions than the previous block we mined, since that - // previous block would now be a reorg. - max_txs = 0; - } else { - info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", - &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); - } + info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", + &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); } } (best_attempt + 1, max_txs) @@ -1818,9 +1803,7 @@ impl BlockMinerThread { Ok(x) => { let num_mblocks = x.as_ref().map(|(mblocks, ..)| mblocks.len()).unwrap_or(0); debug!( - "Loaded {} microblocks descending from {}/{} (data: {})", - num_mblocks, - parent_consensus_hash, + "Loaded {num_mblocks} microblocks descending from {parent_consensus_hash}/{} (data: {})", &stacks_parent_header.anchored_header.block_hash(), x.is_some() ); @@ -1828,17 +1811,15 @@ impl BlockMinerThread { } Err(e) => { warn!( - "Failed to load descendant microblock stream from {}/{}: {:?}", - parent_consensus_hash, - &stacks_parent_header.anchored_header.block_hash(), - &e + "Failed to load descendant microblock stream from {parent_consensus_hash}/{}: {e:?}", + &stacks_parent_header.anchored_header.block_hash() ); None } }; if let Some((ref microblocks, ref poison_opt)) = µblock_info_opt { - if let Some(ref tail) = microblocks.last() { + if let Some(tail) = microblocks.last() { debug!( "Confirm microblock stream tailed at {} (seq {})", &tail.block_hash(), @@ -1848,11 +1829,10 @@ impl BlockMinerThread { // try and confirm as many microblocks as we can (but note that the stream itself may // be too long; we'll try again if that happens). - stacks_parent_header.microblock_tail = - microblocks.last().clone().map(|blk| blk.header.clone()); + stacks_parent_header.microblock_tail = microblocks.last().map(|blk| blk.header.clone()); if let Some(poison_payload) = poison_opt { - debug!("Detected poisoned microblock fork: {:?}", &poison_payload); + debug!("Detected poisoned microblock fork: {poison_payload:?}"); // submit it multiple times with different nonces, so it'll have a good chance of // eventually getting picked up (even if the miner sends other transactions from @@ -1868,21 +1848,15 @@ impl BlockMinerThread { if let Err(e) = mem_pool.miner_submit( chain_state, sortdb, - &parent_consensus_hash, + parent_consensus_hash, &stacks_parent_header.anchored_header.block_hash(), &poison_microblock_tx, Some(&self.event_dispatcher), 1_000_000_000.0, // prioritize this for inclusion ) { - warn!( - "Detected but failed to mine poison-microblock transaction: {:?}", - &e - ); + warn!("Detected but failed to mine poison-microblock transaction: {e:?}"); } else { - debug!( - "Submit poison-microblock transaction {:?}", - &poison_microblock_tx - ); + debug!("Submit poison-microblock transaction {poison_microblock_tx:?}"); } } } @@ -1915,11 +1889,12 @@ impl BlockMinerThread { } btc_addrs .into_iter() - .map(|addr| format!("{}", &addr)) + .map(|addr| format!("{addr}")) .collect() } /// Obtain the target burn fee cap, when considering how well this miner is performing. + #[allow(clippy::too_many_arguments)] pub fn get_mining_spend_amount( config: &Config, keychain: &Keychain, @@ -1947,7 +1922,7 @@ impl BlockMinerThread { }; let Ok(tip) = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).map_err(|e| { - warn!("Failed to load canonical burn chain tip: {:?}", &e); + warn!("Failed to load canonical burn chain tip: {e:?}"); e }) else { return config_file_burn_fee_cap; @@ -1955,10 +1930,7 @@ impl BlockMinerThread { let tip = if let Some(at_burn_block) = at_burn_block.as_ref() { let ih = sortdb.index_handle(&tip.sortition_id); let Ok(Some(ancestor_tip)) = ih.get_block_snapshot_by_height(*at_burn_block) else { - warn!( - "Failed to load ancestor tip at burn height {}", - at_burn_block - ); + warn!("Failed to load ancestor tip at burn height {at_burn_block}"); return config_file_burn_fee_cap; }; ancestor_tip @@ -1968,13 +1940,13 @@ impl BlockMinerThread { let Ok(active_miners_and_commits) = MinerStats::get_active_miners(sortdb, at_burn_block) .map_err(|e| { - warn!("Failed to get active miners: {:?}", &e); + warn!("Failed to get active miners: {e:?}"); e }) else { return config_file_burn_fee_cap; }; - if active_miners_and_commits.len() == 0 { + if active_miners_and_commits.is_empty() { warn!("No active miners detected; using config file burn_fee_cap"); return config_file_burn_fee_cap; } @@ -1984,12 +1956,12 @@ impl BlockMinerThread { .map(|(miner, _cmt)| miner.as_str()) .collect(); - info!("Active miners: {:?}", &active_miners); + info!("Active miners: {active_miners:?}"); let Ok(unconfirmed_block_commits) = miner_stats .get_unconfirmed_commits(tip.block_height + 1, &active_miners) .map_err(|e| { - warn!("Failed to find unconfirmed block-commits: {}", &e); + warn!("Failed to find unconfirmed block-commits: {e}"); e }) else { @@ -2001,24 +1973,20 @@ impl BlockMinerThread { .map(|cmt| (cmt.apparent_sender.to_string(), cmt.burn_fee)) .collect(); - info!( - "Found unconfirmed block-commits: {:?}", - &unconfirmed_miners_and_amounts - ); + info!("Found unconfirmed block-commits: {unconfirmed_miners_and_amounts:?}"); let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( &active_miners_and_commits, &unconfirmed_block_commits, - &recipients, + recipients, ); let win_probs = if miner_config.fast_rampup { // look at spends 6+ blocks in the future - let win_probs = MinerStats::get_future_win_distribution( + MinerStats::get_future_win_distribution( &active_miners_and_commits, &unconfirmed_block_commits, - &recipients, - ); - win_probs + recipients, + ) } else { // look at the current spends let Ok(unconfirmed_burn_dist) = miner_stats @@ -2031,21 +1999,20 @@ impl BlockMinerThread { at_burn_block, ) .map_err(|e| { - warn!("Failed to get unconfirmed burn distribution: {:?}", &e); + warn!("Failed to get unconfirmed burn distribution: {e:?}"); e }) else { return config_file_burn_fee_cap; }; - let win_probs = MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist); - win_probs + MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; - info!("Unconfirmed spend distribution: {:?}", &spend_dist); + info!("Unconfirmed spend distribution: {spend_dist:?}"); info!( - "Unconfirmed win probabilities (fast_rampup={}): {:?}", - miner_config.fast_rampup, &win_probs + "Unconfirmed win probabilities (fast_rampup={}): {win_probs:?}", + miner_config.fast_rampup ); let miner_addrs = Self::get_miner_addrs(config, keychain); @@ -2056,8 +2023,8 @@ impl BlockMinerThread { .unwrap_or(0.0); info!( - "This miner's win probability at {} is {}", - tip.block_height, &win_prob + "This miner's win probability at {} is {win_prob}", + tip.block_height ); set_prior_winning_prob(tip.block_height, win_prob); @@ -2080,8 +2047,7 @@ impl BlockMinerThread { let prior_win_prob = get_prior_winning_prob(prior_burn_height); if prior_win_prob < config.miner.target_win_probability { info!( - "Miner underperformed in block {} ({}/{})", - prior_burn_height, underperformed_count, underperform_stop_threshold + "Miner underperformed in block {prior_burn_height} ({underperformed_count}/{underperform_stop_threshold})" ); underperformed_count += 1; } @@ -2102,6 +2068,7 @@ impl BlockMinerThread { /// Produce the block-commit for this anchored block, if we can. /// Returns the op on success /// Returns None if we fail somehow. + #[allow(clippy::too_many_arguments)] pub fn make_block_commit( &self, burn_db: &mut SortitionDB, @@ -2123,7 +2090,7 @@ impl BlockMinerThread { ) { Ok(x) => x, Err(e) => { - error!("Relayer: Failure fetching recipient set: {:?}", e); + error!("Relayer: Failure fetching recipient set: {e:?}"); return None; } }; @@ -2227,12 +2194,10 @@ impl BlockMinerThread { if let Some(highest_unprocessed_block_sn) = highest_unprocessed_block_sn_opt { if stacks_tip.anchored_header.height() + u64::from(burnchain.pox_constants.prepare_length) - - 1 - >= highest_unprocessed.height + > highest_unprocessed.height && highest_unprocessed_block_sn.block_height + u64::from(burnchain.pox_constants.prepare_length) - - 1 - >= sort_tip.block_height + > sort_tip.block_height { // we're close enough to the chain tip that it's a bad idea for us to mine // -- we'll likely create an orphan @@ -2243,7 +2208,7 @@ impl BlockMinerThread { } } // we can mine - return false; + false } /// Only used in mock signing to generate a peer info view @@ -2301,16 +2266,14 @@ impl BlockMinerThread { // Just wait a min amount of time for the mock signatures to come in while mock_signatures.len() < slot_ids.len() && mock_poll_start.elapsed() < timeout { let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; - for chunk in chunks { - if let Some(chunk) = chunk { - if let Ok(SignerMessage::MockSignature(mock_signature)) = - SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + for chunk in chunks.into_iter().flatten() { + if let Ok(SignerMessage::MockSignature(mock_signature)) = + SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + { + if mock_signature.mock_proposal == *mock_proposal + && !mock_signatures.contains(&mock_signature) { - if mock_signature.mock_proposal == *mock_proposal - && !mock_signatures.contains(&mock_signature) - { - mock_signatures.push(mock_signature); - } + mock_signatures.push(mock_signature); } } } @@ -2325,19 +2288,17 @@ impl BlockMinerThread { StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); let miner_slot_ids: Vec<_> = (0..MINER_SLOT_COUNT * 2).collect(); if let Ok(messages) = miners_stackerdb.get_latest_chunks(&miner_slot_ids) { - for message in messages { - if let Some(message) = message { - if message.is_empty() { - continue; - } - let Ok(SignerMessage::MockBlock(mock_block)) = - SignerMessage::consensus_deserialize(&mut message.as_slice()) - else { - continue; - }; - if mock_block.mock_proposal.peer_info == *peer_info { - return true; - } + for message in messages.into_iter().flatten() { + if message.is_empty() { + continue; + } + let Ok(SignerMessage::MockBlock(mock_block)) = + SignerMessage::consensus_deserialize(&mut message.as_slice()) + else { + continue; + }; + if mock_block.mock_proposal.peer_info == *peer_info { + return true; } } } @@ -2536,10 +2497,7 @@ impl BlockMinerThread { if cfg!(test) { if let Ok(mblock_pubkey_hash_str) = std::env::var("STACKS_MICROBLOCK_PUBKEY_HASH") { if let Ok(bad_pubkh) = Hash160::from_hex(&mblock_pubkey_hash_str) { - debug!( - "Fault injection: set microblock public key hash to {}", - &bad_pubkh - ); + debug!("Fault injection: set microblock public key hash to {bad_pubkh}"); pubkh = bad_pubkh } } @@ -2624,13 +2582,13 @@ impl BlockMinerThread { ) { Ok(block) => block, Err(e) => { - error!("Relayer: Failure mining anchor block even after removing offending microblock {}: {}", &mblock_header_hash, &e); + error!("Relayer: Failure mining anchor block even after removing offending microblock {mblock_header_hash}: {e}"); return None; } } } Err(e) => { - error!("Relayer: Failure mining anchored block: {}", e); + error!("Relayer: Failure mining anchored block: {e}"); return None; } }; @@ -2649,12 +2607,12 @@ impl BlockMinerThread { if miner_config.only_increase_tx_count && max_txs > u64::try_from(anchored_block.txs.len()).expect("too many txs") { - info!("Relayer: Succeeded assembling subsequent block with {} txs, but had previously produced a block with {} txs", anchored_block.txs.len(), max_txs); + info!("Relayer: Succeeded assembling subsequent block with {} txs, but had previously produced a block with {max_txs} txs", anchored_block.txs.len()); return None; } info!( - "Relayer: Succeeded assembling {} block #{}: {}, with {} txs, attempt {}", + "Relayer: Succeeded assembling {} block #{}: {}, with {} txs, attempt {attempt}", if parent_block_info.parent_block_total_burn == 0 { "Genesis" } else { @@ -2662,8 +2620,7 @@ impl BlockMinerThread { }, anchored_block.header.total_work.work, anchored_block.block_hash(), - anchored_block.txs.len(), - attempt + anchored_block.txs.len() ); // let's commit @@ -2780,7 +2737,7 @@ impl BlockMinerThread { return None; } Err(e) => { - warn!("Relayer: Failed to submit Bitcoin transaction: {:?}", e); + warn!("Relayer: Failed to submit Bitcoin transaction: {e:?}"); self.failed_to_submit_last_attempt = true; return None; } @@ -2941,9 +2898,7 @@ impl RelayerThread { /// chain tip? pub fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place - (self.min_network_download_passes <= self.last_network_download_passes - // a network inv pass took place - && self.min_network_download_passes <= self.last_network_download_passes) + self.min_network_download_passes <= self.last_network_download_passes // we waited long enough for a download pass, but timed out waiting || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() // we're not supposed to wait at all @@ -3022,7 +2977,7 @@ impl RelayerThread { net_receipts.processed_unconfirmed_state.receipts.len(); if num_unconfirmed_microblock_tx_receipts > 0 { if let Some(unconfirmed_state) = self.chainstate_ref().unconfirmed_state.as_ref() { - let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); + let canonical_tip = unconfirmed_state.confirmed_chain_tip; self.event_dispatcher.process_new_microblocks( canonical_tip, net_receipts.processed_unconfirmed_state, @@ -3076,7 +3031,7 @@ impl RelayerThread { let burn_height = SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), consensus_hash) .map_err(|e| { - error!("Failed to find block snapshot for mined block: {}", e); + error!("Failed to find block snapshot for mined block: {e}"); e })? .ok_or_else(|| { @@ -3094,7 +3049,7 @@ impl RelayerThread { if !Relayer::static_check_problematic_relayed_block( self.chainstate_ref().mainnet, epoch_id, - &anchored_block, + anchored_block, ASTRules::PrecheckSize, ) { // nope! @@ -3107,24 +3062,22 @@ impl RelayerThread { use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { // record this block somewhere - if !fs::metadata(&path).is_ok() { + if fs::metadata(&path).is_err() { fs::create_dir_all(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path}'")); } let path = Path::new(&path); let path = path.join(Path::new(&format!("{}", &anchored_block.block_hash()))); let mut file = fs::File::create(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{:?}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path:?}'")); let block_bits = anchored_block.serialize_to_vec(); let block_bits_hex = to_hex(&block_bits); - let block_json = format!( - r#"{{"block":"{}","consensus":"{}"}}"#, - &block_bits_hex, &consensus_hash - ); - file.write_all(&block_json.as_bytes()).unwrap_or_else(|_| { - panic!("FATAL: failed to write block bits to '{:?}'", &path) + let block_json = + format!(r#"{{"block":"{block_bits_hex}","consensus":"{consensus_hash}"}}"#); + file.write_all(block_json.as_bytes()).unwrap_or_else(|_| { + panic!("FATAL: failed to write block bits to '{path:?}'") }); info!( "Fault injection: bad block {} saved to {}", @@ -3154,8 +3107,8 @@ impl RelayerThread { chainstate.preprocess_anchored_block( &ic, consensus_hash, - &anchored_block, - &parent_consensus_hash, + anchored_block, + parent_consensus_hash, 0, ) })?; @@ -3234,8 +3187,8 @@ impl RelayerThread { .expect("FATAL: unknown consensus hash"); debug!( - "Relayer: Process tenure {}/{} in {} burn height {}", - &consensus_hash, &block_header_hash, &burn_hash, sn.block_height + "Relayer: Process tenure {consensus_hash}/{block_header_hash} in {burn_hash} burn height {}", + sn.block_height ); if let Some((last_mined_block_data, microblock_privkey)) = @@ -3252,8 +3205,7 @@ impl RelayerThread { let reward_block_height = mined_block.header.total_work.work + MINER_REWARD_MATURITY; info!( - "Relayer: Won sortition! Mining reward will be received in {} blocks (block #{})", - MINER_REWARD_MATURITY, reward_block_height + "Relayer: Won sortition! Mining reward will be received in {MINER_REWARD_MATURITY} blocks (block #{reward_block_height})" ); debug!("Relayer: Won sortition!"; "stacks_header" => %block_header_hash, @@ -3272,7 +3224,7 @@ impl RelayerThread { return (false, None); } Err(e) => { - warn!("Error processing my tenure, bad block produced: {}", e); + warn!("Error processing my tenure, bad block produced: {e}"); warn!( "Bad block"; "stacks_header" => %block_header_hash, @@ -3283,20 +3235,18 @@ impl RelayerThread { }; // advertize _and_ push blocks for now - let blocks_available = Relayer::load_blocks_available_data( - self.sortdb_ref(), - vec![consensus_hash.clone()], - ) - .expect("Failed to obtain block information for a block we mined."); + let blocks_available = + Relayer::load_blocks_available_data(self.sortdb_ref(), vec![consensus_hash]) + .expect("Failed to obtain block information for a block we mined."); let block_data = { let mut bd = HashMap::new(); - bd.insert(consensus_hash.clone(), mined_block.clone()); + bd.insert(consensus_hash, mined_block.clone()); bd }; if let Err(e) = self.relayer.advertize_blocks(blocks_available, block_data) { - warn!("Failed to advertise new block: {}", e); + warn!("Failed to advertise new block: {e}"); } let snapshot = SortitionDB::get_block_snapshot_consensus( @@ -3308,13 +3258,12 @@ impl RelayerThread { if !snapshot.pox_valid { warn!( - "Snapshot for {} is no longer valid; discarding {}...", - &consensus_hash, + "Snapshot for {consensus_hash} is no longer valid; discarding {}...", &mined_block.block_hash() ); miner_tip = Self::pick_higher_tip(miner_tip, None); } else { - let ch = snapshot.consensus_hash.clone(); + let ch = snapshot.consensus_hash; let bh = mined_block.block_hash(); let height = mined_block.header.total_work.work; @@ -3332,7 +3281,7 @@ impl RelayerThread { .relayer .broadcast_block(snapshot.consensus_hash, mined_block) { - warn!("Failed to push new block: {}", e); + warn!("Failed to push new block: {e}"); } } @@ -3355,8 +3304,7 @@ impl RelayerThread { } } else { debug!( - "Relayer: Did not win sortition in {}, winning block was {}/{}", - &burn_hash, &consensus_hash, &block_header_hash + "Relayer: Did not win sortition in {burn_hash}, winning block was {consensus_hash}/{block_header_hash}" ); miner_tip = None; } @@ -3391,7 +3339,7 @@ impl RelayerThread { let tenures = if let Some(last_ch) = self.last_tenure_consensus_hash.as_ref() { let mut tenures = vec![]; let last_sn = - SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), &last_ch) + SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), last_ch) .expect("FATAL: failed to query sortition DB") .expect("FATAL: unknown prior consensus hash"); @@ -3470,8 +3418,7 @@ impl RelayerThread { .expect("FATAL: failed to query sortition DB") .expect("FATAL: no snapshot for consensus hash"); - let old_last_mined_blocks = - mem::replace(&mut self.last_mined_blocks, MinedBlocks::new()); + let old_last_mined_blocks = mem::take(&mut self.last_mined_blocks); self.last_mined_blocks = Self::clear_stale_mined_blocks(this_burn_tip.block_height, old_last_mined_blocks); @@ -3492,11 +3439,9 @@ impl RelayerThread { || mtip.block_hash != stacks_tip_block_hash { debug!( - "Relayer: miner tip {}/{} is NOT canonical ({}/{})", + "Relayer: miner tip {}/{} is NOT canonical ({stacks_tip_consensus_hash}/{stacks_tip_block_hash})", &mtip.consensus_hash, &mtip.block_hash, - &stacks_tip_consensus_hash, - &stacks_tip_block_hash ); miner_tip = None; } else { @@ -3553,14 +3498,11 @@ impl RelayerThread { /// cost since we won't be mining it anymore. fn setup_microblock_mining_state(&mut self, new_miner_tip: Option) { // update state - let my_miner_tip = std::mem::replace(&mut self.miner_tip, None); + let my_miner_tip = std::mem::take(&mut self.miner_tip); let best_tip = Self::pick_higher_tip(my_miner_tip.clone(), new_miner_tip.clone()); if best_tip == new_miner_tip && best_tip != my_miner_tip { // tip has changed - debug!( - "Relayer: Best miner tip went from {:?} to {:?}", - &my_miner_tip, &new_miner_tip - ); + debug!("Relayer: Best miner tip went from {my_miner_tip:?} to {new_miner_tip:?}"); self.microblock_stream_cost = ExecutionCost::zero(); } self.miner_tip = best_tip; @@ -3597,7 +3539,7 @@ impl RelayerThread { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo, - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 0, txid: Txid([0u8; 32]), block_height: 0, @@ -3660,14 +3602,14 @@ impl RelayerThread { for (stacks_bhh, (assembled_block, microblock_privkey)) in last_mined_blocks.into_iter() { if assembled_block.burn_block_height < burn_height { debug!( - "Stale mined block: {} (as of {},{})", - &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height + "Stale mined block: {stacks_bhh} (as of {},{})", + &assembled_block.burn_hash, assembled_block.burn_block_height ); continue; } debug!( - "Mined block in-flight: {} (as of {},{})", - &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height + "Mined block in-flight: {stacks_bhh} (as of {},{})", + &assembled_block.burn_hash, assembled_block.burn_block_height ); ret.insert(stacks_bhh, (assembled_block, microblock_privkey)); } @@ -3676,14 +3618,14 @@ impl RelayerThread { /// Create the block miner thread state. /// Only proceeds if all of the following are true: - /// * the miner is not blocked - /// * last_burn_block corresponds to the canonical sortition DB's chain tip - /// * the time of issuance is sufficiently recent - /// * there are no unprocessed stacks blocks in the staging DB - /// * the relayer has already tried a download scan that included this sortition (which, if a - /// block was found, would have placed it into the staging DB and marked it as - /// unprocessed) - /// * a miner thread is not running already + /// * The miner is not blocked + /// * `last_burn_block` corresponds to the canonical sortition DB's chain tip + /// * The time of issuance is sufficiently recent + /// * There are no unprocessed stacks blocks in the staging DB + /// * The relayer has already tried a download scan that included this sortition (which, if a + /// block was found, would have placed it into the staging DB and marked it as + /// unprocessed) + /// * A miner thread is not running already fn create_block_miner( &mut self, registered_key: RegisteredKey, @@ -3724,16 +3666,15 @@ impl RelayerThread { } } - let burn_header_hash = last_burn_block.burn_header_hash.clone(); + let burn_header_hash = last_burn_block.burn_header_hash; let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); + let burn_chain_tip = burn_chain_sn.burn_header_hash; if burn_chain_tip != burn_header_hash { debug!( - "Relayer: Drop stale RunTenure for {}: current sortition is for {}", - &burn_header_hash, &burn_chain_tip + "Relayer: Drop stale RunTenure for {burn_header_hash}: current sortition is for {burn_chain_tip}" ); self.globals.counters.bump_missed_tenures(); return None; @@ -3749,8 +3690,7 @@ impl RelayerThread { ); if has_unprocessed { debug!( - "Relayer: Drop RunTenure for {} because there are fewer than {} pending blocks", - &burn_header_hash, + "Relayer: Drop RunTenure for {burn_header_hash} because there are fewer than {} pending blocks", self.burnchain.pox_constants.prepare_length - 1 ); return None; @@ -3780,7 +3720,7 @@ impl RelayerThread { // if we're still mining on this burn block, then do nothing if self.miner_thread.is_some() { - debug!("Relayer: will NOT run tenure since miner thread is already running for burn tip {}", &burn_chain_tip); + debug!("Relayer: will NOT run tenure since miner thread is already running for burn tip {burn_chain_tip}"); return None; } @@ -3797,6 +3737,7 @@ impl RelayerThread { /// Try to start up a block miner thread with this given VRF key and current burnchain tip. /// Returns true if the thread was started; false if it was not (for any reason) + #[allow(clippy::incompatible_msrv)] pub fn block_miner_thread_try_start( &mut self, registered_key: RegisteredKey, @@ -3827,7 +3768,7 @@ impl RelayerThread { .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { if let Err(e) = miner_thread_state.send_mock_miner_messages() { - warn!("Failed to send mock miner messages: {}", e); + warn!("Failed to send mock miner messages: {e}"); } miner_thread_state.run_tenure() }) @@ -3898,11 +3839,13 @@ impl RelayerThread { true } - /// Start up a microblock miner thread if we can: - /// * no miner thread must be running already - /// * the miner must not be blocked - /// * we must have won the sortition on the stacks chain tip - /// Returns true if the thread was started; false if not. + /// Start up a microblock miner thread if possible: + /// * No miner thread must be running already + /// * The miner must not be blocked + /// * We must have won the sortition on the Stacks chain tip + /// + /// Returns `true` if the thread was started; `false` if not. + #[allow(clippy::incompatible_msrv)] pub fn microblock_miner_thread_try_start(&mut self) -> bool { let miner_tip = match self.miner_tip.as_ref() { Some(tip) => tip.clone(), @@ -3948,10 +3891,7 @@ impl RelayerThread { let parent_consensus_hash = &miner_tip.consensus_hash; let parent_block_hash = &miner_tip.block_hash; - debug!( - "Relayer: Run microblock tenure for {}/{}", - parent_consensus_hash, parent_block_hash - ); + debug!("Relayer: Run microblock tenure for {parent_consensus_hash}/{parent_block_hash}"); let Some(mut microblock_thread_state) = MicroblockMinerThread::from_relayer_thread(self) else { @@ -4003,8 +3943,7 @@ impl RelayerThread { last_mined_block.burn_block_height, &self.last_mined_blocks, ) - .len() - == 0 + .is_empty() { // first time we've mined a block in this burnchain block debug!( @@ -4019,8 +3958,8 @@ impl RelayerThread { &last_mined_block.anchored_block.block_hash() ); - let bhh = last_mined_block.burn_hash.clone(); - let orig_bhh = last_mined_block.orig_burn_hash.clone(); + let bhh = last_mined_block.burn_hash; + let orig_bhh = last_mined_block.orig_burn_hash; let tenure_begin = last_mined_block.tenure_begin; self.last_mined_blocks.insert( @@ -4033,11 +3972,9 @@ impl RelayerThread { .set_ongoing_commit(ongoing_commit_opt); debug!( - "Relayer: RunTenure finished at {} (in {}ms) targeting {} (originally {})", + "Relayer: RunTenure finished at {} (in {}ms) targeting {bhh} (originally {orig_bhh})", self.last_tenure_issue_time, - self.last_tenure_issue_time.saturating_sub(tenure_begin), - &bhh, - &orig_bhh + self.last_tenure_issue_time.saturating_sub(tenure_begin) ); // this stacks block confirms all in-flight microblocks we know about, @@ -4058,7 +3995,7 @@ impl RelayerThread { let num_mblocks = chainstate .unconfirmed_state .as_ref() - .map(|ref unconfirmed| unconfirmed.num_microblocks()) + .map(|unconfirmed| unconfirmed.num_microblocks()) .unwrap_or(0); (processed_unconfirmed_state, num_mblocks) @@ -4066,11 +4003,9 @@ impl RelayerThread { ); info!( - "Mined one microblock: {} seq {} txs {} (total processed: {})", - µblock_hash, + "Mined one microblock: {microblock_hash} seq {} txs {} (total processed: {num_mblocks})", next_microblock.header.sequence, - next_microblock.txs.len(), - num_mblocks + next_microblock.txs.len() ); self.globals.counters.set_microblocks_processed(num_mblocks); @@ -4090,8 +4025,7 @@ impl RelayerThread { next_microblock, ) { error!( - "Failure trying to broadcast microblock {}: {}", - microblock_hash, e + "Failure trying to broadcast microblock {microblock_hash}: {e}" ); } @@ -4116,7 +4050,7 @@ impl RelayerThread { self.mined_stacks_block = false; } Err(e) => { - warn!("Relayer: Failed to mine next microblock: {:?}", &e); + warn!("Relayer: Failed to mine next microblock: {e:?}"); // switch back to block mining self.mined_stacks_block = false; @@ -4134,14 +4068,16 @@ impl RelayerThread { None } - /// Try to join with the miner thread. If we succeed, join the thread and return true. - /// Otherwise, if the thread is still running, return false; + /// Try to join with the miner thread. If successful, join the thread and return `true`. + /// Otherwise, if the thread is still running, return `false`. + /// /// Updates internal state gleaned from the miner, such as: - /// * new stacks block data - /// * new keychain state - /// * new metrics - /// * new unconfirmed state - /// Returns true if joined; false if not. + /// * New Stacks block data + /// * New keychain state + /// * New metrics + /// * New unconfirmed state + /// + /// Returns `true` if joined; `false` if not. pub fn miner_thread_try_join(&mut self) -> bool { if let Some(thread_handle) = self.miner_thread.take() { let new_thread_handle = self.inner_miner_thread_try_join(thread_handle); @@ -4155,28 +4091,22 @@ impl RelayerThread { let mut f = match fs::File::open(path) { Ok(f) => f, Err(e) => { - warn!("Could not open {}: {:?}", &path, &e); + warn!("Could not open {path}: {e:?}"); return None; } }; let mut registered_key_bytes = vec![]; if let Err(e) = f.read_to_end(&mut registered_key_bytes) { - warn!( - "Failed to read registered key bytes from {}: {:?}", - path, &e - ); + warn!("Failed to read registered key bytes from {path}: {e:?}"); return None; } let Ok(registered_key) = serde_json::from_slice(®istered_key_bytes) else { - warn!( - "Did not load registered key from {}: could not decode JSON", - &path - ); + warn!("Did not load registered key from {path}: could not decode JSON"); return None; }; - info!("Loaded registered key from {}", &path); + info!("Loaded registered key from {path}"); Some(registered_key) } @@ -4193,7 +4123,7 @@ impl RelayerThread { RelayerDirective::RegisterKey(last_burn_block) => { let mut saved_key_opt = None; if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { - saved_key_opt = Self::load_saved_vrf_key(&path); + saved_key_opt = Self::load_saved_vrf_key(path); } if let Some(saved_key) = saved_key_opt { self.globals.resume_leader_key(saved_key); @@ -4266,8 +4196,8 @@ impl ParentStacksBlockInfo { ) -> Result { let stacks_tip_header = StacksChainState::get_anchored_block_header_info( chain_state.db(), - &mine_tip_ch, - &mine_tip_bh, + mine_tip_ch, + mine_tip_bh, ) .unwrap() .ok_or_else(|| { @@ -4335,9 +4265,9 @@ impl ParentStacksBlockInfo { return Err(Error::BurnchainTipChanged); } - debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {} (height {} hash {})", + debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {mine_tip_ch} (height {} hash {})", &check_burn_block.consensus_hash, check_burn_block.block_height, &check_burn_block.burn_header_hash, - mine_tip_ch, parent_snapshot.block_height, &parent_snapshot.burn_header_hash); + parent_snapshot.block_height, &parent_snapshot.burn_header_hash); let coinbase_nonce = { let principal = miner_address.into(); @@ -4349,8 +4279,7 @@ impl ParentStacksBlockInfo { ) .unwrap_or_else(|| { panic!( - "BUG: stacks tip block {}/{} no longer exists after we queried it", - mine_tip_ch, mine_tip_bh + "BUG: stacks tip block {mine_tip_ch}/{mine_tip_bh} no longer exists after we queried it" ) }); account.nonce @@ -4358,9 +4287,9 @@ impl ParentStacksBlockInfo { Ok(ParentStacksBlockInfo { stacks_parent_header: stacks_tip_header, - parent_consensus_hash: mine_tip_ch.clone(), + parent_consensus_hash: *mine_tip_ch, parent_block_burn_height: parent_block_height, - parent_block_total_burn: parent_block_total_burn, + parent_block_total_burn, parent_winning_vtxindex, coinbase_nonce, }) @@ -4412,16 +4341,14 @@ impl PeerThread { .make_cost_metric() .unwrap_or_else(|| Box::new(UnitMetric)); - let mempool = MemPoolDB::open( + MemPoolDB::open( config.is_mainnet(), config.burnchain.chain_id, &config.get_chainstate_path_str(), cost_estimator, metric, ) - .expect("Database failure opening mempool"); - - mempool + .expect("Database failure opening mempool") } /// Instantiate the p2p thread. @@ -4531,6 +4458,7 @@ impl PeerThread { /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not + #[allow(clippy::borrowed_box)] pub fn run_one_pass( &mut self, indexer: &B, @@ -4542,12 +4470,11 @@ impl PeerThread { ) -> bool { // initial block download? let ibd = self.globals.sync_comms.get_ibd(); - let download_backpressure = self.results_with_data.len() > 0; + let download_backpressure = !self.results_with_data.is_empty(); let poll_ms = if !download_backpressure && self.get_network().has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( - "P2P: backpressure: {}, more downloads: {}", - download_backpressure, + "P2P: backpressure: {download_backpressure}, more downloads: {}", self.get_network().has_more_downloads() ); 1 @@ -4566,11 +4493,7 @@ impl PeerThread { // NOTE: handler_args must be created such that it outlives the inner net.run() call and // doesn't ref anything within p2p_thread. let handler_args = RPCHandlerArgs { - exit_at_block_height: p2p_thread - .config - .burnchain - .process_exit_at_block_height - .clone(), + exit_at_block_height: p2p_thread.config.burnchain.process_exit_at_block_height, genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) .unwrap(), event_observer: Some(event_dispatcher), @@ -4635,7 +4558,7 @@ impl PeerThread { Err(e) => { // this is only reachable if the network is not instantiated correctly -- // i.e. you didn't connect it - panic!("P2P: Failed to process network dispatch: {:?}", &e); + panic!("P2P: Failed to process network dispatch: {e:?}"); } }; @@ -4697,9 +4620,8 @@ impl StacksNode { pub(crate) fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { info!( - "Override burnchain height of {:?} to {}", - ASTRules::PrecheckSize, - ast_precheck_size_height + "Override burnchain height of {:?} to {ast_precheck_size_height}", + ASTRules::PrecheckSize ); let mut tx = sortdb .tx_begin() @@ -4726,32 +4648,32 @@ impl StacksNode { .make_cost_metric() .unwrap_or_else(|| Box::new(UnitMetric)); - let mempool = MemPoolDB::open( + MemPoolDB::open( config.is_mainnet(), config.burnchain.chain_id, &config.get_chainstate_path_str(), cost_estimator, metric, ) - .expect("BUG: failed to instantiate mempool"); - - mempool + .expect("BUG: failed to instantiate mempool") } - /// Set up the Peer DB and update any soft state from the config file. This includes: - /// * blacklisted/whitelisted nodes - /// * node keys - /// * bootstrap nodes - /// Returns the instantiated PeerDB + /// Set up the Peer DB and update any soft state from the config file. This includes: + /// * Blacklisted/whitelisted nodes + /// * Node keys + /// * Bootstrap nodes + /// + /// Returns the instantiated `PeerDB`. + /// /// Panics on failure. fn setup_peer_db( config: &Config, burnchain: &Burnchain, stackerdb_contract_ids: &[QualifiedContractIdentifier], ) -> PeerDB { - let data_url = UrlString::try_from(format!("{}", &config.node.data_url)).unwrap(); + let data_url = UrlString::try_from(config.node.data_url.to_string()).unwrap(); let initial_neighbors = config.node.bootstrap_node.clone(); - if initial_neighbors.len() > 0 { + if !initial_neighbors.is_empty() { info!( "Will bootstrap from peers {}", VecDisplay(&initial_neighbors) @@ -4778,7 +4700,7 @@ impl StacksNode { config.burnchain.chain_id, burnchain.network_id, Some(node_privkey), - config.connection_options.private_key_lifetime.clone(), + config.connection_options.private_key_lifetime, PeerAddress::from_socketaddr(&p2p_addr), p2p_sock.port(), data_url, @@ -4787,23 +4709,19 @@ impl StacksNode { stackerdb_contract_ids, ) .map_err(|e| { - eprintln!( - "Failed to open {}: {:?}", - &config.get_peer_db_file_path(), - &e - ); + eprintln!("Failed to open {}: {e:?}", &config.get_peer_db_file_path()); panic!(); }) .unwrap(); // allow all bootstrap nodes { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for initial_neighbor in initial_neighbors.iter() { // update peer in case public key changed - PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap(); + PeerDB::update_peer(&tx, initial_neighbor).unwrap(); PeerDB::set_allow_peer( - &mut tx, + &tx, initial_neighbor.addr.network_id, &initial_neighbor.addr.addrbytes, initial_neighbor.addr.port, @@ -4820,10 +4738,10 @@ impl StacksNode { // deny all config-denied peers { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for denied in config.node.deny_nodes.iter() { PeerDB::set_deny_peer( - &mut tx, + &tx, denied.addr.network_id, &denied.addr.addrbytes, denied.addr.port, @@ -4836,9 +4754,9 @@ impl StacksNode { // update services to indicate we can support mempool sync and stackerdb { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); PeerDB::set_local_services( - &mut tx, + &tx, (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16) | (ServiceFlags::STACKERDB as u16), @@ -4867,7 +4785,7 @@ impl StacksNode { .expect("Error while loading stacks epochs"); let view = { - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("Failed to get sortition tip"); SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) .unwrap() @@ -4908,16 +4826,20 @@ impl StacksNode { stackerdb_machines.insert(contract_id, (stackerdb_config, stacker_db_sync)); } let peerdb = Self::setup_peer_db(config, &burnchain, &stackerdb_contract_ids); + let burnchain_db = burnchain + .open_burnchain_db(false) + .expect("Failed to open burnchain DB"); let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { Ok(local_peer) => local_peer, _ => panic!("Unable to retrieve local peer"), }; - let p2p_net = PeerNetwork::new( + PeerNetwork::new( peerdb, atlasdb, stackerdbs, + burnchain_db, local_peer, config.burnchain.peer_version, burnchain, @@ -4925,9 +4847,7 @@ impl StacksNode { config.connection_options.clone(), stackerdb_machines, epochs, - ); - - p2p_net + ) } /// Main loop of the relayer. @@ -5042,7 +4962,7 @@ impl StacksNode { .get_miner_address(StacksEpochId::Epoch21, &public_key); let miner_addr_str = addr2str(&miner_addr); let _ = monitoring::set_burnchain_signer(BurnchainSigner(miner_addr_str)).map_err(|e| { - warn!("Failed to set global burnchain signer: {:?}", &e); + warn!("Failed to set global burnchain signer: {e:?}"); e }); } @@ -5223,9 +5143,9 @@ impl StacksNode { .globals .relay_send .send(RelayerDirective::ProcessTenure( - snapshot.consensus_hash.clone(), - snapshot.parent_burn_header_hash.clone(), - snapshot.winning_stacks_block_hash.clone(), + snapshot.consensus_hash, + snapshot.parent_burn_header_hash, + snapshot.winning_stacks_block_hash, )) .is_ok(); } @@ -5266,17 +5186,15 @@ impl StacksNode { for op in block_commits.into_iter() { if op.txid == block_snapshot.winning_block_txid { info!( - "Received burnchain block #{} including block_commit_op (winning) - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op (winning) - {} ({})", + op.apparent_sender, &op.block_header_hash ); last_sortitioned_block = Some((block_snapshot.clone(), op.vtxindex)); - } else { - if self.is_miner { - info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash - ); - } + } else if self.is_miner { + info!( + "Received burnchain block #{block_height} including block_commit_op - {} ({})", + op.apparent_sender, &op.block_header_hash + ); } } @@ -5289,8 +5207,7 @@ impl StacksNode { let num_key_registers = key_registers.len(); debug!( - "Processed burnchain state at height {}: {} leader keys, {} block-commits (ibd = {})", - block_height, num_key_registers, num_block_commits, ibd + "Processed burnchain state at height {block_height}: {num_key_registers} leader keys, {num_block_commits} block-commits (ibd = {ibd})" ); // save the registered VRF key @@ -5306,28 +5223,28 @@ impl StacksNode { return ret; }; - info!("Activated VRF key; saving to {}", &path); + info!("Activated VRF key; saving to {path}"); let Ok(key_json) = serde_json::to_string(&activated_key) else { warn!("Failed to serialize VRF key"); return ret; }; - let mut f = match fs::File::create(&path) { + let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { - warn!("Failed to create {}: {:?}", &path, &e); + warn!("Failed to create {path}: {e:?}"); return ret; } }; - if let Err(e) = f.write_all(key_json.as_str().as_bytes()) { - warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); + if let Err(e) = f.write_all(key_json.as_bytes()) { + warn!("Failed to write activated VRF key to {path}: {e:?}"); return ret; } - info!("Saved activated VRF key to {}", &path); - return ret; + info!("Saved activated VRF key to {path}"); + ret } /// Join all inner threads diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 1895912ba5..3636223b3f 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -151,6 +151,7 @@ pub fn get_names(use_test_chainstate_data: bool) -> Box x, Err(e) => { - warn!("Error while connecting burnchain db in peer loop: {}", e); + warn!("Error while connecting burnchain db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -203,7 +204,7 @@ fn spawn_peer( ) { Ok(x) => x, Err(e) => { - warn!("Error while connecting chainstate db in peer loop: {}", e); + warn!("Error while connecting chainstate db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -221,7 +222,7 @@ fn spawn_peer( ) { Ok(x) => x, Err(e) => { - warn!("Error while connecting to mempool db in peer loop: {}", e); + warn!("Error while connecting to mempool db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -268,7 +269,7 @@ pub fn use_test_genesis_chainstate(config: &Config) -> bool { impl Node { /// Instantiate and initialize a new node, given a config - pub fn new(config: Config, boot_block_exec: Box ()>) -> Self { + pub fn new(config: Config, boot_block_exec: Box) -> Self { let use_test_genesis_data = if config.burnchain.mode == "mocknet" { use_test_genesis_chainstate(&config) } else { @@ -318,9 +319,8 @@ impl Node { let (chain_state, receipts) = match chain_state_result { Ok(res) => res, Err(err) => panic!( - "Error while opening chain state at path {}: {:?}", - config.get_chainstate_path_str(), - err + "Error while opening chain state at path {}: {err:?}", + config.get_chainstate_path_str() ), }; @@ -407,18 +407,18 @@ impl Node { Config::assert_valid_epoch_settings(&burnchain, &epochs); let view = { - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("Failed to get sortition tip"); SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) .unwrap() }; // create a new peerdb - let data_url = UrlString::try_from(format!("{}", self.config.node.data_url)).unwrap(); + let data_url = UrlString::try_from(self.config.node.data_url.to_string()).unwrap(); let initial_neighbors = self.config.node.bootstrap_node.clone(); - println!("BOOTSTRAP WITH {:?}", initial_neighbors); + println!("BOOTSTRAP WITH {initial_neighbors:?}"); let rpc_sock: SocketAddr = self.config.node.rpc_bind.parse().unwrap_or_else(|_| { @@ -452,7 +452,7 @@ impl Node { self.config.burnchain.chain_id, burnchain.network_id, Some(node_privkey), - self.config.connection_options.private_key_lifetime.clone(), + self.config.connection_options.private_key_lifetime, PeerAddress::from_socketaddr(&p2p_addr), p2p_sock.port(), data_url, @@ -464,10 +464,10 @@ impl Node { println!("DENY NEIGHBORS {:?}", &self.config.node.deny_nodes); { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for denied in self.config.node.deny_nodes.iter() { PeerDB::set_deny_peer( - &mut tx, + &tx, denied.addr.network_id, &denied.addr.addrbytes, denied.addr.port, @@ -488,12 +488,16 @@ impl Node { }; let event_dispatcher = self.event_dispatcher.clone(); - let exit_at_block_height = self.config.burnchain.process_exit_at_block_height.clone(); + let exit_at_block_height = self.config.burnchain.process_exit_at_block_height; + let burnchain_db = burnchain + .open_burnchain_db(false) + .expect("Failed to open burnchain DB"); let p2p_net = PeerNetwork::new( peerdb, atlasdb, stackerdbs, + burnchain_db, local_peer, self.config.burnchain.peer_version, burnchain.clone(), @@ -577,9 +581,9 @@ impl Node { // Registered key has been mined new_key = Some(RegisteredKey { vrf_public_key: op.public_key.clone(), - block_height: op.block_height as u64, - op_vtxindex: op.vtxindex as u32, - target_block_height: (op.block_height as u64) - 1, + block_height: op.block_height, + op_vtxindex: op.vtxindex, + target_block_height: op.block_height - 1, memo: op.memo.clone(), }); } @@ -649,7 +653,7 @@ impl Node { burnchain.pox_constants, ) .expect("Error while opening sortition db"); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query canonical burn chain tip"); // Generates a proof out of the sortition hash provided in the params. @@ -734,7 +738,7 @@ impl Node { anchored_block_from_ongoing_tenure.header.block_hash(), burn_fee, ®istered_key, - &burnchain_tip, + burnchain_tip, VRFSeed::from_proof(&vrf_proof), ); @@ -784,15 +788,13 @@ impl Node { ) .unwrap_or_else(|_| { panic!( - "BUG: could not query chainstate to find parent consensus hash of {}/{}", - consensus_hash, + "BUG: could not query chainstate to find parent consensus hash of {consensus_hash}/{}", &anchored_block.block_hash() ) }) .unwrap_or_else(|| { panic!( - "BUG: no such parent of block {}/{}", - consensus_hash, + "BUG: no such parent of block {consensus_hash}/{}", &anchored_block.block_hash() ) }); @@ -802,7 +804,7 @@ impl Node { .preprocess_anchored_block( &ic, consensus_hash, - &anchored_block, + anchored_block, &parent_consensus_hash, 0, ) @@ -813,7 +815,7 @@ impl Node { let res = self .chain_state .preprocess_streamed_microblock( - &consensus_hash, + consensus_hash, &anchored_block.block_hash(), microblock, ) @@ -847,33 +849,30 @@ impl Node { ) }; match process_blocks_at_tip { - Err(e) => panic!("Error while processing block - {:?}", e), + Err(e) => panic!("Error while processing block - {e:?}"), Ok(ref mut blocks) => { - if blocks.len() == 0 { + if blocks.is_empty() { break; } else { for block in blocks.iter() { - match block { - (Some(epoch_receipt), _) => { - let attachments_instances = - self.get_attachment_instances(epoch_receipt, &atlas_config); - if !attachments_instances.is_empty() { - for new_attachment in attachments_instances.into_iter() { - if let Err(e) = - atlas_db.queue_attachment_instance(&new_attachment) - { - warn!( - "Atlas: Error writing attachment instance to DB"; - "err" => ?e, - "index_block_hash" => %new_attachment.index_block_hash, - "contract_id" => %new_attachment.contract_id, - "attachment_index" => %new_attachment.attachment_index, - ); - } + if let (Some(epoch_receipt), _) = block { + let attachments_instances = + self.get_attachment_instances(epoch_receipt, &atlas_config); + if !attachments_instances.is_empty() { + for new_attachment in attachments_instances.into_iter() { + if let Err(e) = + atlas_db.queue_attachment_instance(&new_attachment) + { + warn!( + "Atlas: Error writing attachment instance to DB"; + "err" => ?e, + "index_block_hash" => %new_attachment.index_block_hash, + "contract_id" => %new_attachment.contract_id, + "attachment_index" => %new_attachment.attachment_index, + ); } } } - _ => {} } } @@ -990,7 +989,7 @@ impl Node { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo: vec![], - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 1, txid, block_height: 0, diff --git a/testnet/stacks-node/src/operations.rs b/testnet/stacks-node/src/operations.rs index 4680098d2b..7e26fb42e2 100644 --- a/testnet/stacks-node/src/operations.rs +++ b/testnet/stacks-node/src/operations.rs @@ -31,8 +31,7 @@ impl BurnchainOpSigner { } pub fn get_public_key(&mut self) -> Secp256k1PublicKey { - let public_key = Secp256k1PublicKey::from_private(&self.secret_key); - public_key + Secp256k1PublicKey::from_private(&self.secret_key) } pub fn sign_message(&mut self, hash: &[u8]) -> Option { @@ -44,7 +43,7 @@ impl BurnchainOpSigner { let signature = match self.secret_key.sign(hash) { Ok(r) => r, Err(e) => { - debug!("Secret key error: {:?}", &e); + debug!("Secret key error: {e:?}"); return None; } }; diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 85ace37fa4..2333167334 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -137,8 +137,8 @@ impl BootRunLoop { /// node depending on the current burnchain height. pub fn start(&mut self, burnchain_opt: Option, mine_start: u64) { match self.active_loop { - InnerLoops::Epoch2(_) => return self.start_from_neon(burnchain_opt, mine_start), - InnerLoops::Epoch3(_) => return self.start_from_naka(burnchain_opt, mine_start), + InnerLoops::Epoch2(_) => self.start_from_neon(burnchain_opt, mine_start), + InnerLoops::Epoch3(_) => self.start_from_naka(burnchain_opt, mine_start), } } @@ -227,7 +227,7 @@ impl BootRunLoop { // if loop exited, do the transition info!("Epoch-3.0 boundary reached, stopping Epoch-2.x run loop"); neon_term_switch.store(false, Ordering::SeqCst); - return true + true }) } diff --git a/testnet/stacks-node/src/run_loop/helium.rs b/testnet/stacks-node/src/run_loop/helium.rs index 2922ce584a..c61581553c 100644 --- a/testnet/stacks-node/src/run_loop/helium.rs +++ b/testnet/stacks-node/src/run_loop/helium.rs @@ -21,10 +21,7 @@ impl RunLoop { } /// Sets up a runloop and node, given a config. - pub fn new_with_boot_exec( - config: Config, - boot_exec: Box ()>, - ) -> Self { + pub fn new_with_boot_exec(config: Config, boot_exec: Box) -> Self { // Build node based on config let node = Node::new(config.clone(), boot_exec); @@ -174,17 +171,14 @@ impl RunLoop { None => None, }; - match artifacts_from_tenure { - Some(ref artifacts) => { - // Have each node receive artifacts from the current tenure - self.node.commit_artifacts( - &artifacts.anchored_block, - &artifacts.parent_block, - &mut burnchain, - artifacts.burn_fee, - ); - } - None => {} + if let Some(artifacts) = &artifacts_from_tenure { + // Have each node receive artifacts from the current tenure + self.node.commit_artifacts( + &artifacts.anchored_block, + &artifacts.parent_block, + &mut burnchain, + artifacts.burn_fee, + ); } let (new_burnchain_tip, _) = burnchain.sync(None)?; diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index b2b9aa3f75..7990c04332 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -37,6 +37,7 @@ macro_rules! info_green { }) } +#[allow(clippy::type_complexity)] pub struct RunLoopCallbacks { on_burn_chain_initialized: Option)>, on_new_burn_chain_state: Option, @@ -45,6 +46,12 @@ pub struct RunLoopCallbacks { on_new_tenure: Option, } +impl Default for RunLoopCallbacks { + fn default() -> Self { + Self::new() + } +} + impl RunLoopCallbacks { pub fn new() -> RunLoopCallbacks { RunLoopCallbacks { @@ -125,7 +132,7 @@ impl RunLoopCallbacks { match &tx.payload { TransactionPayload::Coinbase(..) => println!(" Coinbase"), TransactionPayload::SmartContract(contract, ..) => println!(" Publish smart contract\n**************************\n{:?}\n**************************", contract.code_body), - TransactionPayload::TokenTransfer(recipent, amount, _) => println!(" Transfering {} µSTX to {}", amount, recipent), + TransactionPayload::TokenTransfer(recipent, amount, _) => println!(" Transfering {amount} µSTX to {recipent}"), _ => println!(" {:?}", tx.payload) } } @@ -167,7 +174,7 @@ pub fn announce_boot_receipts( event_dispatcher: &mut EventDispatcher, chainstate: &StacksChainState, pox_constants: &PoxConstants, - boot_receipts: &Vec, + boot_receipts: &[StacksTransactionReceipt], ) { let block_header_0 = StacksChainState::get_genesis_header_info(chainstate.db()) .expect("FATAL: genesis block header not stored"); @@ -189,7 +196,7 @@ pub fn announce_boot_receipts( Txid([0x00; 32]), &[], None, - block_header_0.burn_header_hash.clone(), + block_header_0.burn_header_hash, block_header_0.burn_header_height, block_header_0.burn_header_timestamp, &ExecutionCost::zero(), diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 3d94b1c351..16f5a12b2d 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -100,7 +100,7 @@ impl RunLoop { config, globals: None, coordinator_channels: Some(channels), - counters: counters.unwrap_or_else(|| Counters::new()), + counters: counters.unwrap_or_default(), should_keep_running, event_dispatcher, pox_watchdog: None, @@ -167,9 +167,8 @@ impl RunLoop { if self.config.node.miner { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); - match burnchain.create_wallet_if_dne() { - Err(e) => warn!("Error when creating wallet: {:?}", e), - _ => {} + if let Err(e) = burnchain.create_wallet_if_dne() { + warn!("Error when creating wallet: {e:?}"); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -285,7 +284,6 @@ impl RunLoop { let mut atlas_config = AtlasConfig::new(self.config.is_mainnet()); let genesis_attachments = GenesisData::new(use_test_genesis_data) .read_name_zonefiles() - .into_iter() .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec())) .collect(); atlas_config.genesis_attachments = Some(genesis_attachments); @@ -296,7 +294,7 @@ impl RunLoop { let moved_atlas_config = self.config.atlas.clone(); let moved_config = self.config.clone(); let moved_burnchain_config = burnchain_config.clone(); - let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let coordinator_dispatcher = self.event_dispatcher.clone(); let atlas_db = AtlasDB::connect( moved_atlas_config.clone(), &self.config.get_atlas_db_file_path(), @@ -325,13 +323,12 @@ impl RunLoop { require_affirmed_anchor_blocks: moved_config .node .require_affirmed_anchor_blocks, - ..ChainsCoordinatorConfig::new() }; ChainsCoordinator::run( coord_config, chain_state_db, moved_burnchain_config, - &mut coordinator_dispatcher, + &coordinator_dispatcher, coordinator_receivers, moved_atlas_config, cost_estimator.as_deref_mut(), @@ -382,7 +379,7 @@ impl RunLoop { Some(sn) => sn, None => { debug!("No canonical stacks chain tip hash present"); - let sn = SortitionDB::get_first_block_snapshot(&sortdb.conn()) + let sn = SortitionDB::get_first_block_snapshot(sortdb.conn()) .expect("BUG: failed to get first-ever block snapshot"); sn } @@ -432,7 +429,7 @@ impl RunLoop { return; } Err(e) => { - error!("Error initializing burnchain: {}", e); + error!("Error initializing burnchain: {e}"); info!("Exiting stacks-node"); return; } @@ -477,7 +474,7 @@ impl RunLoop { // Make sure at least one sortition has happened, and make sure it's globally available let sortdb = burnchain.sortdb_mut(); let (rc_aligned_height, sn) = - RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); + RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config); let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height { // need at least one sortition to happen. @@ -525,10 +522,7 @@ impl RunLoop { burnchain.get_headers_height() - 1, ); - debug!( - "Runloop: Begin main runloop starting a burnchain block {}", - sortition_db_height - ); + debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; let mut poll_deadline = 0; @@ -576,11 +570,10 @@ impl RunLoop { // runloop will cause the PoX sync watchdog to wait until it believes that the node has // obtained all the Stacks blocks it can. debug!( - "Runloop: Download burnchain blocks up to reward cycle #{} (height {})", + "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})", burnchain_config .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: target burnchain block height does not have a reward cycle"), - target_burnchain_block_height; + .expect("FATAL: target burnchain block height does not have a reward cycle"); "total_burn_sync_percent" => %percent, "local_burn_height" => burnchain_tip.block_snapshot.block_height, "remote_tip_height" => remote_chain_height @@ -601,7 +594,7 @@ impl RunLoop { match burnchain.sync(Some(target_burnchain_block_height)) { Ok(x) => x, Err(e) => { - warn!("Runloop: Burnchain controller stopped: {}", e); + warn!("Runloop: Burnchain controller stopped: {e}"); continue; } }; @@ -615,15 +608,13 @@ impl RunLoop { if next_sortition_height != last_tenure_sortition_height { info!( - "Runloop: Downloaded burnchain blocks up to height {}; target height is {}; remote_chain_height = {} next_sortition_height = {}, sortition_db_height = {}", - burnchain_height, target_burnchain_block_height, remote_chain_height, next_sortition_height, sortition_db_height + "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}" ); } if next_sortition_height > sortition_db_height { debug!( - "Runloop: New burnchain block height {} > {}", - next_sortition_height, sortition_db_height + "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); let mut sort_count = 0; @@ -669,8 +660,7 @@ impl RunLoop { num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {} from {} (chain tip height is {}); {} sortitions", - next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" ); sortition_db_height = next_sortition_height; @@ -702,7 +692,7 @@ impl RunLoop { remote_chain_height, ); - debug!("Runloop: Advance target burnchain block height from {} to {} (sortition height {})", target_burnchain_block_height, next_target_burnchain_block_height, sortition_db_height); + debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); target_burnchain_block_height = next_target_burnchain_block_height; if sortition_db_height >= burnchain_height && !ibd { @@ -712,9 +702,7 @@ impl RunLoop { .unwrap_or(0); if canonical_stacks_tip_height < mine_start { info!( - "Runloop: Synchronized full burnchain, but stacks tip height is {}, and we are trying to boot to {}, not mining until reaching chain tip", - canonical_stacks_tip_height, - mine_start + "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip" ); } else { // once we've synced to the chain tip once, don't apply this check again. @@ -725,13 +713,11 @@ impl RunLoop { if last_tenure_sortition_height != sortition_db_height { if is_miner { info!( - "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", - sortition_db_height + "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks" ); } else { info!( - "Runloop: Synchronized full burnchain up to height {}.", - sortition_db_height + "Runloop: Synchronized full burnchain up to height {sortition_db_height}." ); } last_tenure_sortition_height = sortition_db_height; diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index a18a61988b..5e021e50ab 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -93,6 +93,19 @@ impl Default for TestFlag { } } +#[cfg(test)] +impl TestFlag { + /// Set the test flag to the given value + pub fn set(&self, value: bool) { + *self.0.lock().unwrap() = Some(value); + } + + /// Get the test flag value. Defaults to false if the flag is not set. + pub fn get(&self) -> bool { + self.0.lock().unwrap().unwrap_or(false) + } +} + #[derive(Clone, Default)] pub struct Counters { pub blocks_processed: RunLoopCounter, @@ -342,7 +355,7 @@ impl RunLoop { } } _ => { - let msg = format!("Graceful termination request received (signal `{}`), will complete the ongoing runloop cycles and terminate\n", sig_id); + let msg = format!("Graceful termination request received (signal `{sig_id}`), will complete the ongoing runloop cycles and terminate\n"); async_safe_write_stderr(&msg); keep_running_writer.store(false, Ordering::SeqCst); } @@ -353,7 +366,7 @@ impl RunLoop { if cfg!(test) || allow_err { info!("Error setting up signal handler, may have already been set"); } else { - panic!("FATAL: error setting termination handler - {}", e); + panic!("FATAL: error setting termination handler - {e}"); } } } @@ -369,9 +382,8 @@ impl RunLoop { if self.config.node.miner { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); - match burnchain.create_wallet_if_dne() { - Err(e) => warn!("Error when creating wallet: {:?}", e), - _ => {} + if let Err(e) = burnchain.create_wallet_if_dne() { + warn!("Error when creating wallet: {e:?}"); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -462,7 +474,7 @@ impl RunLoop { panic!(); } Err(e) => { - panic!("FATAL: unable to query filesystem or databases: {:?}", &e); + panic!("FATAL: unable to query filesystem or databases: {e:?}"); } } @@ -476,13 +488,13 @@ impl RunLoop { Some(burnchain_tip) => { // database exists already, and has blocks -- just sync to its tip. let target_height = burnchain_tip.block_height + 1; - debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {}", burnchain_tip.block_height, target_height); + debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {target_height}", burnchain_tip.block_height); target_height } None => { // database does not exist yet let target_height = 1.max(burnchain_config.first_block_height + 1); - debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {}", target_height); + debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {target_height}"); target_height } }; @@ -490,22 +502,19 @@ impl RunLoop { burnchain_controller .start(Some(target_burnchain_block_height)) .map_err(|e| { - match e { - Error::CoordinatorClosed => { - if !should_keep_running.load(Ordering::SeqCst) { - info!("Shutdown initiated during burnchain initialization: {}", e); - return burnchain_error::ShutdownInitiated; - } - } - _ => {} + if matches!(e, Error::CoordinatorClosed) + && !should_keep_running.load(Ordering::SeqCst) + { + info!("Shutdown initiated during burnchain initialization: {e}"); + return burnchain_error::ShutdownInitiated; } - error!("Burnchain controller stopped: {}", e); + error!("Burnchain controller stopped: {e}"); panic!(); })?; // if the chainstate DBs don't exist, this will instantiate them if let Err(e) = burnchain_controller.connect_dbs() { - error!("Failed to connect to burnchain databases: {}", e); + error!("Failed to connect to burnchain databases: {e}"); panic!(); }; @@ -581,7 +590,6 @@ impl RunLoop { let mut atlas_config = AtlasConfig::new(self.config.is_mainnet()); let genesis_attachments = GenesisData::new(use_test_genesis_data) .read_name_zonefiles() - .into_iter() .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec())) .collect(); atlas_config.genesis_attachments = Some(genesis_attachments); @@ -592,7 +600,7 @@ impl RunLoop { let moved_atlas_config = self.config.atlas.clone(); let moved_config = self.config.clone(); let moved_burnchain_config = burnchain_config.clone(); - let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let coordinator_dispatcher = self.event_dispatcher.clone(); let atlas_db = AtlasDB::connect( moved_atlas_config.clone(), &self.config.get_atlas_db_file_path(), @@ -621,13 +629,12 @@ impl RunLoop { require_affirmed_anchor_blocks: moved_config .node .require_affirmed_anchor_blocks, - ..ChainsCoordinatorConfig::new() }; ChainsCoordinator::run( coord_config, chain_state_db, moved_burnchain_config, - &mut coordinator_dispatcher, + &coordinator_dispatcher, coordinator_receivers, moved_atlas_config, cost_estimator.as_deref_mut(), @@ -685,7 +692,7 @@ impl RunLoop { Some(sn) => sn, None => { debug!("No canonical stacks chain tip hash present"); - let sn = SortitionDB::get_first_block_snapshot(&sortdb.conn()) + let sn = SortitionDB::get_first_block_snapshot(sortdb.conn()) .expect("BUG: failed to get first-ever block snapshot"); sn } @@ -737,7 +744,7 @@ impl RunLoop { let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, @@ -745,7 +752,7 @@ impl RunLoop { ) { Ok(am) => am, Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); + warn!("Failed to find heaviest affirmation map: {e:?}"); return; } }; @@ -761,7 +768,7 @@ impl RunLoop { match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { Ok(am) => am, Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); + warn!("Failed to find sortition affirmation map: {e:?}"); return; } }; @@ -787,26 +794,24 @@ impl RunLoop { .find_divergence(&heaviest_affirmation_map) .is_some() { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map); + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})"); globals.coord().announce_new_burn_block(); } else if highest_sn.block_height == sn.block_height && sn.block_height == canonical_burnchain_tip.block_height { // need to force an affirmation reorg because there will be no more burn block // announcements. - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, burn height {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, sn.block_height); + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map}, burn height {})", sn.block_height); globals.coord().announce_new_burn_block(); } debug!( - "Drive stacks block processing: possible PoX reorg (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map + "Drive stacks block processing: possible PoX reorg (stacks tip: {stacks_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})" ); globals.coord().announce_new_stacks_block(); } else { debug!( - "Drive stacks block processing: no need (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map + "Drive stacks block processing: no need (stacks tip: {stacks_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})" ); // announce a new stacks block to force the chains coordinator @@ -877,7 +882,7 @@ impl RunLoop { match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { Ok(am) => am, Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); + warn!("Failed to find sortition affirmation map: {e:?}"); return; } }; @@ -885,7 +890,7 @@ impl RunLoop { let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, @@ -893,22 +898,22 @@ impl RunLoop { ) { Ok(am) => am, Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); + warn!("Failed to find heaviest affirmation map: {e:?}"); return; } }; let canonical_affirmation_map = match static_get_canonical_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, - &chain_state_db, + chain_state_db, &sn.sortition_id, ) { Ok(am) => am, Err(e) => { - warn!("Failed to find canonical affirmation map: {:?}", &e); + warn!("Failed to find canonical affirmation map: {e:?}"); return; } }; @@ -919,7 +924,7 @@ impl RunLoop { .is_some() || sn.block_height < highest_sn.block_height { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, {} = (heaviest_affirmation_map.len() as u64) { // we have unaffirmed PoX anchor blocks that are not yet processed in the sortition history - debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {}, heaviest: {}, canonical: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, &canonical_affirmation_map); + debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map}, canonical: {canonical_affirmation_map})"); globals.coord().announce_new_burn_block(); globals.coord().announce_new_stacks_block(); *last_announce_time = get_epoch_time_secs().into(); @@ -939,9 +944,7 @@ impl RunLoop { } } else { debug!( - "Drive burn block processing: no need (sortition tip: {}, heaviest: {}, {} ` so that data can be passed to `NakamotoNode` pub fn start( @@ -1063,7 +1064,7 @@ impl RunLoop { return None; } Err(e) => { - error!("Error initializing burnchain: {}", e); + error!("Error initializing burnchain: {e}"); info!("Exiting stacks-node"); return None; } @@ -1109,7 +1110,7 @@ impl RunLoop { // Make sure at least one sortition has happened, and make sure it's globally available let sortdb = burnchain.sortdb_mut(); let (rc_aligned_height, sn) = - RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); + RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config); let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height { // need at least one sortition to happen. @@ -1137,7 +1138,7 @@ impl RunLoop { .tx_begin() .expect("FATAL: failed to begin burnchain DB tx"); for (reward_cycle, affirmation) in self.config.burnchain.affirmation_overrides.iter() { - tx.set_override_affirmation_map(*reward_cycle, affirmation.clone()).expect(&format!("FATAL: failed to set affirmation override ({affirmation}) for reward cycle {reward_cycle}")); + tx.set_override_affirmation_map(*reward_cycle, affirmation.clone()).unwrap_or_else(|_| panic!("FATAL: failed to set affirmation override ({affirmation}) for reward cycle {reward_cycle}")); } tx.commit() .expect("FATAL: failed to commit burnchain DB tx"); @@ -1168,10 +1169,7 @@ impl RunLoop { burnchain.get_headers_height() - 1, ); - debug!( - "Runloop: Begin main runloop starting a burnchain block {}", - sortition_db_height - ); + debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; @@ -1210,7 +1208,7 @@ impl RunLoop { ) { Ok(ibd) => ibd, Err(e) => { - debug!("Runloop: PoX sync wait routine aborted: {:?}", e); + debug!("Runloop: PoX sync wait routine aborted: {e:?}"); continue; } }; @@ -1231,11 +1229,10 @@ impl RunLoop { // runloop will cause the PoX sync watchdog to wait until it believes that the node has // obtained all the Stacks blocks it can. debug!( - "Runloop: Download burnchain blocks up to reward cycle #{} (height {})", + "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})", burnchain_config .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: target burnchain block height does not have a reward cycle"), - target_burnchain_block_height; + .expect("FATAL: target burnchain block height does not have a reward cycle"); "total_burn_sync_percent" => %percent, "local_burn_height" => burnchain_tip.block_snapshot.block_height, "remote_tip_height" => remote_chain_height @@ -1250,7 +1247,7 @@ impl RunLoop { match burnchain.sync(Some(target_burnchain_block_height)) { Ok(x) => x, Err(e) => { - warn!("Runloop: Burnchain controller stopped: {}", e); + warn!("Runloop: Burnchain controller stopped: {e}"); continue; } }; @@ -1264,15 +1261,13 @@ impl RunLoop { if next_sortition_height != last_tenure_sortition_height { info!( - "Runloop: Downloaded burnchain blocks up to height {}; target height is {}; remote_chain_height = {} next_sortition_height = {}, sortition_db_height = {}", - burnchain_height, target_burnchain_block_height, remote_chain_height, next_sortition_height, sortition_db_height + "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}" ); } if next_sortition_height > sortition_db_height { debug!( - "Runloop: New burnchain block height {} > {}", - next_sortition_height, sortition_db_height + "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); let mut sort_count = 0; @@ -1345,8 +1340,7 @@ impl RunLoop { num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {} from {} (chain tip height is {}); {} sortitions", - next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" ); sortition_db_height = next_sortition_height; @@ -1378,7 +1372,7 @@ impl RunLoop { remote_chain_height, ); - debug!("Runloop: Advance target burnchain block height from {} to {} (sortition height {})", target_burnchain_block_height, next_target_burnchain_block_height, sortition_db_height); + debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); target_burnchain_block_height = next_target_burnchain_block_height; if sortition_db_height >= burnchain_height && !ibd { @@ -1388,9 +1382,7 @@ impl RunLoop { .unwrap_or(0); if canonical_stacks_tip_height < mine_start { info!( - "Runloop: Synchronized full burnchain, but stacks tip height is {}, and we are trying to boot to {}, not mining until reaching chain tip", - canonical_stacks_tip_height, - mine_start + "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip" ); } else { // once we've synced to the chain tip once, don't apply this check again. @@ -1401,8 +1393,7 @@ impl RunLoop { // at tip, and not downloading. proceed to mine. if last_tenure_sortition_height != sortition_db_height { info!( - "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", - sortition_db_height + "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks" ); last_tenure_sortition_height = sortition_db_height; } diff --git a/testnet/stacks-node/src/stacks_events.rs b/testnet/stacks-node/src/stacks_events.rs index f63b17a6ab..d7ec349466 100644 --- a/testnet/stacks-node/src/stacks_events.rs +++ b/testnet/stacks-node/src/stacks_events.rs @@ -22,10 +22,7 @@ fn main() { if help { println!("Usage: stacks-events [--addr=]"); - println!( - " --addr= Address to listen on (default: {})", - DEFAULT_ADDR - ); + println!(" --addr= Address to listen on (default: {DEFAULT_ADDR})",); return; } @@ -34,7 +31,7 @@ fn main() { fn serve_for_events(addr: &String) { let listener = TcpListener::bind(addr).unwrap(); - eprintln!("Listening on {}", addr); + eprintln!("Listening on {addr}"); for stream in listener.incoming() { let stream = stream.unwrap(); handle_connection(stream); @@ -82,17 +79,16 @@ fn handle_connection(mut stream: TcpStream) { "path": path.unwrap(), "payload": payload_json, }); - println!("{}", record); + println!("{record}"); { let contents = "Thanks!"; let response = format!( - "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{}", - contents.len(), - contents + "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{contents}", + contents.len() ); - stream.write(response.as_bytes()).unwrap(); + let _nmb_bytes = stream.write(response.as_bytes()).unwrap(); stream.flush().unwrap(); } } diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index ff68126a83..395d829c8f 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -69,7 +69,7 @@ impl PoxSyncWatchdogComms { self.interruptable_sleep(1)?; std::hint::spin_loop(); } - return Ok(true); + Ok(true) } fn interruptable_sleep(&self, secs: u64) -> Result<(), burnchain_error> { @@ -95,7 +95,7 @@ impl PoxSyncWatchdogComms { self.interruptable_sleep(1)?; std::hint::spin_loop(); } - return Ok(true); + Ok(true) } pub fn should_keep_running(&self) -> bool { @@ -180,8 +180,7 @@ impl PoxSyncWatchdog { Ok(cs) => cs, Err(e) => { return Err(format!( - "Failed to open chainstate at '{}': {:?}", - &chainstate_path, &e + "Failed to open chainstate at '{chainstate_path}': {e:?}" )); } }; @@ -192,7 +191,7 @@ impl PoxSyncWatchdog { new_processed_blocks: VecDeque::new(), last_attachable_query: 0, last_processed_query: 0, - max_samples: max_samples, + max_samples, max_staging: 10, watch_start_ts: 0, last_block_processed_ts: 0, @@ -200,7 +199,7 @@ impl PoxSyncWatchdog { estimated_block_process_time: 5.0, steady_state_burnchain_sync_interval: burnchain_poll_time, steady_state_resync_ts: 0, - chainstate: chainstate, + chainstate, relayer_comms: watchdog_comms, }) } @@ -213,11 +212,11 @@ impl PoxSyncWatchdog { fn count_attachable_stacks_blocks(&mut self) -> Result { // number of staging blocks that have arrived since the last sortition let cnt = StacksChainState::count_attachable_staging_blocks( - &self.chainstate.db(), + self.chainstate.db(), self.max_staging, self.last_attachable_query, ) - .map_err(|e| format!("Failed to count attachable staging blocks: {:?}", &e))?; + .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; self.last_attachable_query = get_epoch_time_secs(); Ok(cnt) @@ -229,11 +228,11 @@ impl PoxSyncWatchdog { fn count_processed_stacks_blocks(&mut self) -> Result { // number of staging blocks that have arrived since the last sortition let cnt = StacksChainState::count_processed_staging_blocks( - &self.chainstate.db(), + self.chainstate.db(), self.max_staging, self.last_processed_query, ) - .map_err(|e| format!("Failed to count attachable staging blocks: {:?}", &e))?; + .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; self.last_processed_query = get_epoch_time_secs(); Ok(cnt) @@ -250,13 +249,13 @@ impl PoxSyncWatchdog { last_processed_height + (burnchain.stable_confirmations as u64) < burnchain_height; if ibd { debug!( - "PoX watchdog: {} + {} < {}, so initial block download", - last_processed_height, burnchain.stable_confirmations, burnchain_height + "PoX watchdog: {last_processed_height} + {} < {burnchain_height}, so initial block download", + burnchain.stable_confirmations ); } else { debug!( - "PoX watchdog: {} + {} >= {}, so steady-state", - last_processed_height, burnchain.stable_confirmations, burnchain_height + "PoX watchdog: {last_processed_height} + {} >= {burnchain_height}, so steady-state", + burnchain.stable_confirmations ); } ibd @@ -281,7 +280,7 @@ impl PoxSyncWatchdog { /// Is a derivative approximately flat, with a maximum absolute deviation from 0? /// Return whether or not the sample is mostly flat, and how many points were over the given /// error bar in either direction. - fn is_mostly_flat(deriv: &Vec, error: i64) -> (bool, usize) { + fn is_mostly_flat(deriv: &[i64], error: i64) -> (bool, usize) { let mut total_deviates = 0; let mut ret = true; for d in deriv.iter() { @@ -294,7 +293,7 @@ impl PoxSyncWatchdog { } /// low and high pass filter average -- take average without the smallest and largest values - fn hilo_filter_avg(samples: &Vec) -> f64 { + fn hilo_filter_avg(samples: &[i64]) -> f64 { // take average with low and high pass let mut min = i64::MAX; let mut max = i64::MIN; @@ -344,7 +343,7 @@ impl PoxSyncWatchdog { ) -> f64 { let this_reward_cycle = burnchain .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {}", tip_height)); + .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); let prev_reward_cycle = this_reward_cycle.saturating_sub(1); let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); @@ -358,7 +357,7 @@ impl PoxSyncWatchdog { } let block_wait_times = - StacksChainState::measure_block_wait_time(&chainstate.db(), start_height, end_height) + StacksChainState::measure_block_wait_time(chainstate.db(), start_height, end_height) .expect("BUG: failed to query chainstate block-processing times"); PoxSyncWatchdog::hilo_filter_avg(&block_wait_times) @@ -372,7 +371,7 @@ impl PoxSyncWatchdog { ) -> f64 { let this_reward_cycle = burnchain .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {}", tip_height)); + .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); let prev_reward_cycle = this_reward_cycle.saturating_sub(1); let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); @@ -386,7 +385,7 @@ impl PoxSyncWatchdog { } let block_download_times = StacksChainState::measure_block_download_time( - &chainstate.db(), + chainstate.db(), start_height, end_height, ) @@ -459,10 +458,7 @@ impl PoxSyncWatchdog { } if self.unconditionally_download { - debug!( - "PoX watchdog set to unconditionally download (ibd={})", - ibbd - ); + debug!("PoX watchdog set to unconditionally download (ibd={ibbd})"); self.relayer_comms.set_ibd(ibbd); return Ok(ibbd); } @@ -561,7 +557,7 @@ impl PoxSyncWatchdog { && get_epoch_time_secs() < expected_first_block_deadline { // still waiting for that first block in this reward cycle - debug!("PoX watchdog: Still warming up: waiting until {}s for first Stacks block download (estimated download time: {}s)...", expected_first_block_deadline, self.estimated_block_download_time); + debug!("PoX watchdog: Still warming up: waiting until {expected_first_block_deadline}s for first Stacks block download (estimated download time: {}s)...", self.estimated_block_download_time); sleep_ms(PER_SAMPLE_WAIT_MS); continue; } @@ -596,8 +592,8 @@ impl PoxSyncWatchdog { let (flat_processed, processed_deviants) = PoxSyncWatchdog::is_mostly_flat(&processed_delta, 0); - debug!("PoX watchdog: flat-attachable?: {}, flat-processed?: {}, estimated block-download time: {}s, estimated block-processing time: {}s", - flat_attachable, flat_processed, self.estimated_block_download_time, self.estimated_block_process_time); + debug!("PoX watchdog: flat-attachable?: {flat_attachable}, flat-processed?: {flat_processed}, estimated block-download time: {}s, estimated block-processing time: {}s", + self.estimated_block_download_time, self.estimated_block_process_time); if flat_attachable && flat_processed && self.last_block_processed_ts == 0 { // we're flat-lining -- this may be the end of this cycle @@ -607,8 +603,8 @@ impl PoxSyncWatchdog { if self.last_block_processed_ts > 0 && get_epoch_time_secs() < expected_last_block_deadline { - debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{})s before burnchain synchronization (estimated block-processing time: {}s)", - get_epoch_time_secs() + 1, expected_last_block_deadline, self.estimated_block_process_time); + debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{expected_last_block_deadline})s before burnchain synchronization (estimated block-processing time: {}s)", + get_epoch_time_secs() + 1, self.estimated_block_process_time); sleep_ms(PER_SAMPLE_WAIT_MS); continue; } @@ -617,8 +613,7 @@ impl PoxSyncWatchdog { // doing initial burnchain block download right now. // only proceed to fetch the next reward cycle's burnchain blocks if we're neither downloading nor // attaching blocks recently - debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {}, flat-processed = {}, min-attachable: {}, min-processed: {}", - flat_attachable, flat_processed, &attachable_deviants, &processed_deviants); + debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {flat_attachable}, flat-processed = {flat_processed}, min-attachable: {attachable_deviants}, min-processed: {processed_deviants}"); if !flat_attachable || !flat_processed { sleep_ms(PER_SAMPLE_WAIT_MS); @@ -645,7 +640,7 @@ impl PoxSyncWatchdog { } (err_attach, err_processed) => { // can only happen on DB query failure - error!("PoX watchdog: Failed to count recently attached ('{:?}') and/or processed ('{:?}') staging blocks", &err_attach, &err_processed); + error!("PoX watchdog: Failed to count recently attached ('{err_attach:?}') and/or processed ('{err_processed:?}') staging blocks"); panic!(); } }; diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index 5dd67cddab..7322133889 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -41,7 +41,8 @@ pub struct Tenure { parent_block_total_burn: u64, } -impl<'a> Tenure { +impl Tenure { + #[allow(clippy::too_many_arguments)] pub fn new( parent_block: ChainTip, coinbase_tx: StacksTransaction, @@ -82,7 +83,7 @@ impl<'a> Tenure { elapsed = Instant::now().duration_since(self.burnchain_tip.received_at); } - let (mut chain_state, _) = StacksChainState::open( + let (chain_state, _) = StacksChainState::open( self.config.is_mainnet(), self.config.burnchain.chain_id, &self.config.get_chainstate_path_str(), @@ -91,13 +92,13 @@ impl<'a> Tenure { .unwrap(); let (anchored_block, _, _) = StacksBlockBuilder::build_anchored_block( - &mut chain_state, + &chain_state, burn_dbconn, &mut self.mem_pool, &self.parent_block.metadata, self.parent_block_total_burn, self.vrf_proof.clone(), - self.microblock_pubkeyhash.clone(), + self.microblock_pubkeyhash, &self.coinbase_tx, BlockBuilderSettings::limited(), None, diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 621f92aa47..3e69ac18cc 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -17,21 +17,14 @@ use crate::helium::RunLoop; use crate::tests::to_addr; use crate::Config; -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum BitcoinCoreError { + #[error("bitcoind spawn failed: {0}")] SpawnFailed(String), + #[error("bitcoind stop failed: {0}")] StopFailed(String), } -impl std::fmt::Display for BitcoinCoreError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::SpawnFailed(msg) => write!(f, "bitcoind spawn failed: {msg}"), - Self::StopFailed(msg) => write!(f, "bitcoind stop failed: {msg}"), - } - } -} - type BitcoinResult = Result; pub struct BitcoinCoreController { @@ -50,21 +43,18 @@ impl BitcoinCoreController { fn add_rpc_cli_args(&self, command: &mut Command) { command.arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); - match ( + if let (Some(username), Some(password)) = ( &self.config.burnchain.username, &self.config.burnchain.password, ) { - (Some(username), Some(password)) => { - command - .arg(format!("-rpcuser={username}")) - .arg(format!("-rpcpassword={password}")); - } - _ => {} + command + .arg(format!("-rpcuser={username}")) + .arg(format!("-rpcpassword={password}")); } } pub fn start_bitcoind(&mut self) -> BitcoinResult<()> { - std::fs::create_dir_all(&self.config.get_burnchain_path_str()).unwrap(); + std::fs::create_dir_all(self.config.get_burnchain_path_str()).unwrap(); let mut command = Command::new("bitcoind"); command @@ -111,7 +101,7 @@ impl BitcoinCoreController { } pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { - if let Some(_) = self.bitcoind_process.take() { + if self.bitcoind_process.take().is_some() { let payload = BitcoinRPCRequest { method: "stop".to_string(), params: vec![], @@ -128,8 +118,7 @@ impl BitcoinCoreController { } } else { return Err(BitcoinCoreError::StopFailed(format!( - "Invalid response: {:?}", - res + "Invalid response: {res:?}" ))); } } @@ -224,11 +213,11 @@ fn bitcoind_integration(segwit_flag: bool) { .callbacks .on_new_burn_chain_state(|round, burnchain_tip, chain_tip| { let block = &burnchain_tip.block_snapshot; - let expected_total_burn = BITCOIND_INT_TEST_COMMITS * (round as u64 + 1); + let expected_total_burn = BITCOIND_INT_TEST_COMMITS * (round + 1); assert_eq!(block.total_burn, expected_total_burn); - assert_eq!(block.sortition, true); - assert_eq!(block.num_sortitions, round as u64 + 1); - assert_eq!(block.block_height, round as u64 + 2003); + assert!(block.sortition); + assert_eq!(block.num_sortitions, round + 1); + assert_eq!(block.block_height, round + 2003); let leader_key = "f888e0cab5c16de8edf72b544a189ece5c0b95cd9178606c970789ac71d17bb4"; match round { @@ -253,7 +242,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert!(op.parent_vtxindex == 0); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } } @@ -277,7 +266,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2003); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -306,7 +295,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2004); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -335,7 +324,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2005); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -364,7 +353,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2006); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -393,7 +382,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2007); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -471,7 +460,6 @@ fn bitcoind_integration(segwit_flag: bool) { }, _ => {} }; - return }); // Use block's hook for asserting expectations diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 076a5f61f3..6fe0018ced 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -19,9 +19,7 @@ use stacks::core::{ PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, }; use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksAddress, VRFSeed, -}; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRFSeed}; use stacks_common::util::hash::hex_bytes; use stacks_common::util::sleep_ms; @@ -50,7 +48,7 @@ fn test_exact_block_costs() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let epoch_205_transition_height = 210; let transactions_to_broadcast = 25; @@ -256,10 +254,8 @@ fn test_exact_block_costs() { if dbget_txs.len() >= 2 { processed_txs_before_205 = true; } - } else { - if dbget_txs.len() >= 2 { - processed_txs_after_205 = true; - } + } else if dbget_txs.len() >= 2 { + processed_txs_after_205 = true; } assert_eq!(mined_anchor_cost, anchor_cost as u64); @@ -287,7 +283,7 @@ fn test_dynamic_db_method_costs() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let contract_name = "test-contract"; let epoch_205_transition_height = 210; @@ -455,8 +451,7 @@ fn test_dynamic_db_method_costs() { .as_i64() .unwrap(); eprintln!( - "Burn height = {}, runtime_cost = {}, function_name = {}", - burn_height, runtime_cost, function_name + "Burn height = {burn_height}, runtime_cost = {runtime_cost}, function_name = {function_name}" ); if function_name == "db-get1" { @@ -569,21 +564,20 @@ fn transition_empty_blocks() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height == epoch_2_05 { @@ -831,7 +825,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::SmartContract(contract, ..) => { - contract.name == ContractName::try_from("increment-contract").unwrap() + contract.name == ContractName::from("increment-contract") } _ => false, }, @@ -847,7 +841,7 @@ fn test_cost_limit_switch_version205() { 0, 1000, conf.burnchain.chain_id, - &creator_addr.into(), + &creator_addr, "increment-contract", "increment-many", &[], @@ -863,7 +857,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::ContractCall(contract) => { - contract.contract_name == ContractName::try_from("increment-contract").unwrap() + contract.contract_name == ContractName::from("increment-contract") } _ => false, }, @@ -882,7 +876,7 @@ fn test_cost_limit_switch_version205() { 0, 1000, conf.burnchain.chain_id, - &creator_addr.into(), + &creator_addr, "increment-contract", "increment-many", &[], @@ -897,7 +891,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::ContractCall(contract) => { - contract.contract_name == ContractName::try_from("increment-contract").unwrap() + contract.contract_name == ContractName::from("increment-contract") } _ => false, }, @@ -916,10 +910,7 @@ fn bigger_microblock_streams_in_2_05() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -993,7 +984,7 @@ fn bigger_microblock_streams_in_2_05() { 0, 1049230, conf.burnchain.chain_id, - &format!("large-{}", ix), + &format!("large-{ix}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -1035,9 +1026,8 @@ fn bigger_microblock_streams_in_2_05() { ) ) (begin - (crash-me \"{}\")) - ", - &format!("large-contract-{}", &ix) + (crash-me \"large-contract-{ix}\")) + " ) ) }) @@ -1176,9 +1166,9 @@ fn bigger_microblock_streams_in_2_05() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("costs-2").is_some() { + if tsc.name.to_string().contains("costs-2") { in_205 = true; - } else if tsc.name.to_string().find("large").is_some() { + } else if tsc.name.to_string().contains("large") { num_big_microblock_txs += 1; if in_205 { total_big_txs_per_microblock_205 += 1; @@ -1209,7 +1199,7 @@ fn bigger_microblock_streams_in_2_05() { max_big_txs_per_microblock_20 = num_big_microblock_txs; } - eprintln!("Epoch size: {:?}", &total_execution_cost); + eprintln!("Epoch size: {total_execution_cost:?}"); if !in_205 && total_execution_cost.exceeds(&epoch_20_stream_cost) { epoch_20_stream_cost = total_execution_cost; @@ -1232,21 +1222,13 @@ fn bigger_microblock_streams_in_2_05() { } eprintln!( - "max_big_txs_per_microblock_20: {}, total_big_txs_per_microblock_20: {}", - max_big_txs_per_microblock_20, total_big_txs_per_microblock_20 - ); - eprintln!( - "max_big_txs_per_microblock_205: {}, total_big_txs_per_microblock_205: {}", - max_big_txs_per_microblock_205, total_big_txs_per_microblock_205 - ); - eprintln!( - "confirmed stream execution in 2.0: {:?}", - &epoch_20_stream_cost + "max_big_txs_per_microblock_20: {max_big_txs_per_microblock_20}, total_big_txs_per_microblock_20: {total_big_txs_per_microblock_20}" ); eprintln!( - "confirmed stream execution in 2.05: {:?}", - &epoch_205_stream_cost + "max_big_txs_per_microblock_205: {max_big_txs_per_microblock_205}, total_big_txs_per_microblock_205: {total_big_txs_per_microblock_205}" ); + eprintln!("confirmed stream execution in 2.0: {epoch_20_stream_cost:?}"); + eprintln!("confirmed stream execution in 2.05: {epoch_205_stream_cost:?}"); // stuff happened assert!(epoch_20_stream_cost.runtime > 0); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 8f6c466318..1ad23db5e1 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1,6 +1,7 @@ use std::collections::{HashMap, HashSet}; use std::{env, thread}; +use ::core::str; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::ClarityVersion; use stacks::burnchains::bitcoin::address::{ @@ -46,7 +47,7 @@ use crate::tests::neon_integrations::*; use crate::tests::*; use crate::{neon, BitcoinRegtestController, BurnchainController, Keychain}; -const MINER_BURN_PUBLIC_KEY: &'static str = +const MINER_BURN_PUBLIC_KEY: &str = "03dc62fe0b8964d01fc9ca9a5eec0e22e557a12cc656919e648f04e0b26fea5faa"; fn advance_to_2_1( @@ -127,7 +128,7 @@ fn advance_to_2_1( btc_regtest_controller.bootstrap_chain(1); let mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); - debug!("Mining pubkey is {}", &mining_pubkey); + debug!("Mining pubkey is {mining_pubkey}"); btc_regtest_controller.set_mining_pubkey(MINER_BURN_PUBLIC_KEY.to_string()); mining_pubkey @@ -135,7 +136,7 @@ fn advance_to_2_1( btc_regtest_controller.bootstrap_chain(1); let mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); - debug!("Mining pubkey is {}", &mining_pubkey); + debug!("Mining pubkey is {mining_pubkey}"); btc_regtest_controller.set_mining_pubkey(MINER_BURN_PUBLIC_KEY.to_string()); btc_regtest_controller.bootstrap_chain(1); @@ -153,8 +154,8 @@ fn advance_to_2_1( .get_all_utxos(&Secp256k1PublicKey::from_hex(&mining_pubkey).unwrap()); eprintln!( - "UTXOs for {} (segwit={}): {:?}", - &mining_pubkey, conf.miner.segwit, &utxos + "UTXOs for {mining_pubkey} (segwit={}): {utxos:?}", + conf.miner.segwit ); assert_eq!(utxos.len(), 1); @@ -197,8 +198,8 @@ fn advance_to_2_1( let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!( - "\nPoX info at {}\n{:?}\n\n", - tip_info.burn_block_height, &pox_info + "\nPoX info at {}\n{pox_info:?}\n\n", + tip_info.burn_block_height ); // this block is the epoch transition? @@ -210,13 +211,13 @@ fn advance_to_2_1( ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip @@ -224,7 +225,6 @@ fn advance_to_2_1( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, tip_info.burn_block_height, - res ); if tip_info.burn_block_height >= epoch_2_1 { @@ -251,7 +251,7 @@ fn advance_to_2_1( true, ) .unwrap_err(); - eprintln!("No pox-2: {}", &e); + eprintln!("No pox-2: {e}"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -264,13 +264,13 @@ fn advance_to_2_1( assert_eq!(account.nonce, 9); eprintln!("Begin Stacks 2.1"); - return ( + ( conf, btcd_controller, btc_regtest_controller, blocks_processed, channel, - ); + ) } #[test] @@ -285,7 +285,7 @@ fn transition_adds_burn_block_height() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let (conf, _btcd_controller, mut btc_regtest_controller, blocks_processed, coord_channel) = advance_to_2_1( @@ -409,11 +409,10 @@ fn transition_adds_burn_block_height() { for event in events.iter() { if let Some(cev) = event.get("contract_event") { // strip leading `0x` - eprintln!("{:#?}", &cev); + eprintln!("{cev:#?}"); let clarity_serialized_value = hex_bytes( - &String::from_utf8( - cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..] - .to_vec(), + str::from_utf8( + &cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..], ) .unwrap(), ) @@ -544,7 +543,7 @@ fn transition_fixes_bitcoin_rigidity() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let _spender_btc_addr = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Regtest, LegacyBitcoinAddressType::PublicKeyHash, @@ -554,7 +553,7 @@ fn transition_fixes_bitcoin_rigidity() { let spender_2_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk); - let spender_2_addr: PrincipalData = spender_2_stx_addr.clone().into(); + let spender_2_addr: PrincipalData = spender_2_stx_addr.into(); let epoch_2_05 = 210; let epoch_2_1 = 215; @@ -655,7 +654,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op for a transfer-stx op that will get mined before the 2.1 epoch let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -687,8 +686,8 @@ fn transition_fixes_bitcoin_rigidity() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -698,7 +697,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -728,21 +727,20 @@ fn transition_fixes_bitcoin_rigidity() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height >= epoch_2_1 { @@ -778,7 +776,7 @@ fn transition_fixes_bitcoin_rigidity() { true, ) .unwrap_err(); - eprintln!("No pox-2: {}", &e); + eprintln!("No pox-2: {e}"); // costs-3 should NOT be initialized let e = get_contract_src( @@ -788,7 +786,7 @@ fn transition_fixes_bitcoin_rigidity() { true, ) .unwrap_err(); - eprintln!("No costs-3: {}", &e); + eprintln!("No costs-3: {e}"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -812,7 +810,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -840,8 +838,8 @@ fn transition_fixes_bitcoin_rigidity() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -851,7 +849,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -885,7 +883,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -914,8 +912,8 @@ fn transition_fixes_bitcoin_rigidity() { // let's fire off our transfer op. let transfer_stx_op = TransferStxOp { - sender: spender_2_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_2_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -925,7 +923,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -952,7 +950,7 @@ fn transition_fixes_bitcoin_rigidity() { // let's fire off another transfer op that will fall outside the window let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -980,8 +978,8 @@ fn transition_fixes_bitcoin_rigidity() { }; let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 123, memo: vec![], // to be filled in @@ -991,7 +989,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -1070,11 +1068,7 @@ fn transition_adds_get_pox_addr_recipients() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (conf, _btcd_controller, mut btc_regtest_controller, blocks_processed, coord_channel) = advance_to_2_1(initial_balances, None, Some(pox_constants.clone()), false); @@ -1094,11 +1088,10 @@ fn transition_adds_get_pox_addr_recipients() { .iter() .enumerate() { - let spender_sk = spender_sks[i].clone(); + let spender_sk = spender_sks[i]; let pox_addr_tuple = execute( &format!( - "{{ hashbytes: 0x{}, version: 0x{:02x} }}", - pox_pubkey_hash, + "{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x{:02x} }}", &(*addr_variant as u8) ), ClarityVersion::Clarity2, @@ -1126,9 +1119,8 @@ fn transition_adds_get_pox_addr_recipients() { } // stack some STX to segwit addressses - for i in 4..7 { - let spender_sk = spender_sks[i].clone(); - let pubk = Secp256k1PublicKey::from_private(&spender_sk); + for (i, spender_sk) in spender_sks.iter().enumerate().take(7).skip(4) { + let pubk = Secp256k1PublicKey::from_private(spender_sk); let version = i as u8; let bytes = match i { 4 => { @@ -1141,13 +1133,13 @@ fn transition_adds_get_pox_addr_recipients() { } }; let pox_addr_tuple = execute( - &format!("{{ hashbytes: 0x{}, version: 0x{:02x} }}", &bytes, &version), + &format!("{{ hashbytes: 0x{bytes}, version: 0x{version:02x} }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let tx = make_contract_call( - &spender_sk, + spender_sk, 0, 300, conf.burnchain.chain_id, @@ -1183,7 +1175,7 @@ fn transition_adds_get_pox_addr_recipients() { ) "; - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sks[0])); + let spender_addr_c32 = to_addr(&spender_sks[0]); let contract_tx = make_contract_publish( &spender_sks[0], 1, @@ -1197,17 +1189,15 @@ fn transition_adds_get_pox_addr_recipients() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); test_observer::clear(); // mine through two reward cycles // now let's mine until the next reward cycle starts ... - while sort_height - < (stack_sort_height as u64) + (((2 * pox_constants.reward_cycle_length) + 1) as u64) - { + while sort_height < stack_sort_height + (((2 * pox_constants.reward_cycle_length) + 1) as u64) { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = coord_channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let cc_tx = make_contract_call( @@ -1244,13 +1234,12 @@ fn transition_adds_get_pox_addr_recipients() { let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if parsed.txid() == cc_txid { // check events for this block - for (_i, event) in events.iter().enumerate() { + for event in events.iter() { if let Some(cev) = event.get("contract_event") { // strip leading `0x` let clarity_serialized_value = hex_bytes( - &String::from_utf8( - cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..] - .to_vec(), + str::from_utf8( + &cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..], ) .unwrap(), ) @@ -1293,16 +1282,16 @@ fn transition_adds_get_pox_addr_recipients() { .unwrap(); // NOTE: there's an even number of payouts here, so this works - eprintln!("payout at {} = {}", burn_block_height, &payout); + eprintln!("payout at {burn_block_height} = {payout}"); if pox_constants.is_in_prepare_phase(0, burn_block_height) { // in prepare phase - eprintln!("{} in prepare phase", burn_block_height); + eprintln!("{burn_block_height} in prepare phase"); assert_eq!(payout, conf.burnchain.burn_fee_cap as u128); assert_eq!(pox_addr_tuples.len(), 1); } else { // in reward phase - eprintln!("{} in reward phase", burn_block_height); + eprintln!("{burn_block_height} in reward phase"); assert_eq!( payout, (conf.burnchain.burn_fee_cap / (OUTPUTS_PER_COMMIT as u64)) @@ -1313,10 +1302,11 @@ fn transition_adds_get_pox_addr_recipients() { for pox_addr_value in pox_addr_tuples.into_iter() { let pox_addr = - PoxAddress::try_from_pox_tuple(false, &pox_addr_value).expect( - &format!("FATAL: invalid PoX tuple {:?}", &pox_addr_value), - ); - eprintln!("at {}: {:?}", burn_block_height, &pox_addr); + PoxAddress::try_from_pox_tuple(false, &pox_addr_value) + .unwrap_or_else(|| { + panic!("FATAL: invalid PoX tuple {pox_addr_value:?}") + }); + eprintln!("at {burn_block_height}: {pox_addr:?}"); if !pox_addr.is_burn() { found_pox_addrs.insert(pox_addr); } @@ -1328,14 +1318,14 @@ fn transition_adds_get_pox_addr_recipients() { } } - eprintln!("found pox addrs: {:?}", &found_pox_addrs); + eprintln!("found pox addrs: {found_pox_addrs:?}"); assert_eq!(found_pox_addrs.len(), 7); for addr in found_pox_addrs .into_iter() .map(|addr| Value::Tuple(addr.as_clarity_tuple().unwrap())) { - eprintln!("Contains: {:?}", &addr); + eprintln!("Contains: {addr:?}"); assert!(expected_pox_addrs.contains(&addr.to_string())); } } @@ -1388,7 +1378,7 @@ fn transition_adds_mining_from_segwit() { let utxos = btc_regtest_controller .get_all_utxos(&Secp256k1PublicKey::from_hex(MINER_BURN_PUBLIC_KEY).unwrap()); - assert!(utxos.len() > 0); + assert!(!utxos.is_empty()); // all UTXOs should be segwit for utxo in utxos.iter() { @@ -1398,7 +1388,7 @@ fn transition_adds_mining_from_segwit() { ); if let Some(BitcoinAddress::Segwit(SegwitBitcoinAddress::P2WPKH(..))) = &utxo_addr { } else { - panic!("UTXO address was {:?}", &utxo_addr); + panic!("UTXO address was {utxo_addr:?}"); } } @@ -1428,10 +1418,10 @@ fn transition_adds_mining_from_segwit() { SortitionDB::get_block_commits_by_block(sortdb.conn(), &tip.sortition_id).unwrap(); assert_eq!(commits.len(), 1); - let txid = commits[0].txid.clone(); + let txid = commits[0].txid; let tx = btc_regtest_controller.get_raw_transaction(&txid); - eprintln!("tx = {:?}", &tx); + eprintln!("tx = {tx:?}"); assert_eq!(tx.input[0].witness.len(), 2); let addr = BitcoinAddress::try_from_segwit( false, @@ -1462,11 +1452,7 @@ fn transition_removes_pox_sunset() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -1518,8 +1504,8 @@ fn transition_removes_pox_sunset() { 4 * prepare_phase_len / 5, 5, 15, - (sunset_start_rc * reward_cycle_len - 1).into(), - (sunset_end_rc * reward_cycle_len).into(), + sunset_start_rc * reward_cycle_len - 1, + sunset_end_rc * reward_cycle_len, (epoch_21 as u32) + 1, u32::MAX, u32::MAX, @@ -1573,11 +1559,8 @@ fn transition_removes_pox_sunset() { let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 0); let tx = make_contract_call( @@ -1591,7 +1574,7 @@ fn transition_removes_pox_sunset() { &[ Value::UInt(first_bal as u128 - 260 * 3), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -1605,29 +1588,26 @@ fn transition_removes_pox_sunset() { submit_tx(&http_origin, &tx); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {}", sort_height); + eprintln!("Sort height pox-1: {sort_height}"); // advance to next reward cycle for _i in 0..(reward_cycle_len * 2 + 2) { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {} <= {}", sort_height, epoch_21); + eprintln!("Sort height pox-1: {sort_height} <= {epoch_21}"); } // pox must activate let pox_info = get_pox_info(&http_origin).unwrap(); - eprintln!("pox_info in pox-1 = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + eprintln!("pox_info in pox-1 = {pox_info:?}"); + assert!(pox_info.current_cycle.is_pox_active); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); // advance to 2.1 while sort_height <= epoch_21 + 1 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {} <= {}", sort_height, epoch_21); + eprintln!("Sort height pox-1: {sort_height} <= {epoch_21}"); } let pox_info = get_pox_info(&http_origin).unwrap(); @@ -1635,12 +1615,9 @@ fn transition_removes_pox_sunset() { // pox is still "active" despite unlock, because there's enough participation, and also even // though the v1 block height has passed, the pox-2 contract won't be managing reward sets // until the next reward cycle - eprintln!("pox_info in pox-2 = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox-2") - ); + eprintln!("pox_info in pox-2 = {pox_info:?}"); + assert!(pox_info.current_cycle.is_pox_active); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox-2"); // re-stack let tx = make_contract_call( @@ -1654,7 +1631,7 @@ fn transition_removes_pox_sunset() { &[ Value::UInt(first_bal as u128 - 260 * 3), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -1671,30 +1648,24 @@ fn transition_removes_pox_sunset() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!( - "Sort height pox-1 to pox-2 with stack-stx to pox-2: {}", - sort_height - ); + eprintln!("Sort height pox-1 to pox-2 with stack-stx to pox-2: {sort_height}"); let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + assert!(pox_info.current_cycle.is_pox_active); // get pox back online while sort_height <= epoch_21 + reward_cycle_len { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-2: {}", sort_height); + eprintln!("Sort height pox-2: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - eprintln!("pox_info = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + eprintln!("pox_info = {pox_info:?}"); + assert!(pox_info.current_cycle.is_pox_active); // first full reward cycle with pox-2 - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox-2") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox-2"); let burn_blocks = test_observer::get_burn_blocks(); let mut pox_out_opt = None; @@ -1719,9 +1690,9 @@ fn transition_removes_pox_sunset() { if (i as u64) < (sunset_start_rc * reward_cycle_len) { // before sunset - if recipients.len() >= 1 { + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { - pox_out_opt = if let Some(pox_out) = pox_out_opt.clone() { + pox_out_opt = if let Some(pox_out) = pox_out_opt { Some(std::cmp::max(amt, pox_out)) } else { Some(amt) @@ -1730,16 +1701,16 @@ fn transition_removes_pox_sunset() { } } else if (i as u64) >= (sunset_start_rc * reward_cycle_len) && (i as u64) + 1 < epoch_21 { // some sunset burn happened - let pox_out = pox_out_opt.clone().unwrap(); - if recipients.len() >= 1 { + let pox_out = pox_out_opt.unwrap(); + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { assert!(amt < pox_out); } } } else if (i as u64) + 1 >= epoch_21 { // no sunset burn happened - let pox_out = pox_out_opt.clone().unwrap(); - if recipients.len() >= 1 { + let pox_out = pox_out_opt.unwrap(); + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { // NOTE: odd number of reward cycles if !burnchain_config.is_in_prepare_phase((i + 2) as u64) { @@ -1862,8 +1833,8 @@ fn transition_empty_blocks() { let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!( - "\nPoX info at {}\n{:?}\n\n", - tip_info.burn_block_height, &pox_info + "\nPoX info at {}\n{pox_info:?}\n\n", + tip_info.burn_block_height ); // this block is the epoch transition? @@ -1875,21 +1846,20 @@ fn transition_empty_blocks() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height == epoch_2_05 || tip_info.burn_block_height == epoch_2_1 { @@ -1987,8 +1957,8 @@ pub fn wait_pox_stragglers(confs: &[Config], max_stacks_tip: u64, block_time_ms: let mut stacks_tip_bhh = None; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.stacks_tip_height < max_stacks_tip { straggler = true; @@ -2057,15 +2027,9 @@ fn test_pox_reorgs_three_flaps() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -2112,7 +2076,7 @@ fn test_pox_reorgs_three_flaps() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -2124,9 +2088,9 @@ fn test_pox_reorgs_three_flaps() { let rpc_port = 41043 + 10 * i; let p2p_port = 41043 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); // conf.connection_options.inv_reward_cycles = 10; @@ -2134,16 +2098,14 @@ fn test_pox_reorgs_three_flaps() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -2151,8 +2113,8 @@ fn test_pox_reorgs_three_flaps() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -2187,10 +2149,10 @@ fn test_pox_reorgs_three_flaps() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -2215,8 +2177,8 @@ fn test_pox_reorgs_three_flaps() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -2224,7 +2186,7 @@ fn test_pox_reorgs_three_flaps() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -2238,23 +2200,19 @@ fn test_pox_reorgs_three_flaps() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -2264,19 +2222,14 @@ fn test_pox_reorgs_three_flaps() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -2288,7 +2241,7 @@ fn test_pox_reorgs_three_flaps() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -2311,11 +2264,9 @@ fn test_pox_reorgs_three_flaps() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -2325,8 +2276,8 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -2337,15 +2288,15 @@ fn test_pox_reorgs_three_flaps() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -2353,8 +2304,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); } @@ -2369,13 +2320,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2386,21 +2337,21 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2411,8 +2362,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2423,13 +2374,13 @@ fn test_pox_reorgs_three_flaps() { // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2440,8 +2391,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history continues to overtake miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2452,13 +2403,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2469,8 +2420,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0 may have won here, but its affirmation map isn't yet the heaviest. } @@ -2479,13 +2430,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2496,8 +2447,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0's affirmation map now becomes the heaviest. } @@ -2506,13 +2457,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2524,8 +2475,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0's affirmation map is now the heaviest, and there's no longer a tie. max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); @@ -2538,24 +2489,21 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; max tip is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; max tip is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on affirmation maps for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -2599,15 +2547,9 @@ fn test_pox_reorg_one_flap() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -2654,7 +2596,7 @@ fn test_pox_reorg_one_flap() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -2666,24 +2608,22 @@ fn test_pox_reorg_one_flap() { let rpc_port = 41063 + 10 * i; let p2p_port = 41063 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -2691,8 +2631,8 @@ fn test_pox_reorg_one_flap() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -2727,10 +2667,10 @@ fn test_pox_reorg_one_flap() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -2755,8 +2695,8 @@ fn test_pox_reorg_one_flap() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -2764,7 +2704,7 @@ fn test_pox_reorg_one_flap() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -2778,23 +2718,19 @@ fn test_pox_reorg_one_flap() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -2804,19 +2740,14 @@ fn test_pox_reorg_one_flap() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -2828,7 +2759,7 @@ fn test_pox_reorg_one_flap() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -2851,11 +2782,9 @@ fn test_pox_reorg_one_flap() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -2865,8 +2794,8 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -2877,15 +2806,15 @@ fn test_pox_reorg_one_flap() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -2893,8 +2822,8 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -2907,13 +2836,13 @@ fn test_pox_reorg_one_flap() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2924,21 +2853,21 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2950,8 +2879,8 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2966,24 +2895,21 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3025,15 +2951,9 @@ fn test_pox_reorg_flap_duel() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3080,7 +3000,7 @@ fn test_pox_reorg_flap_duel() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3094,24 +3014,23 @@ fn test_pox_reorg_flap_duel() { let rpc_port = 41083 + 10 * i; let p2p_port = 41083 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); - confs[i].node.set_bootstrap_nodes( + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3119,8 +3038,8 @@ fn test_pox_reorg_flap_duel() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -3155,10 +3074,10 @@ fn test_pox_reorg_flap_duel() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -3183,8 +3102,8 @@ fn test_pox_reorg_flap_duel() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -3192,7 +3111,7 @@ fn test_pox_reorg_flap_duel() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -3206,23 +3125,19 @@ fn test_pox_reorg_flap_duel() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -3232,19 +3147,14 @@ fn test_pox_reorg_flap_duel() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -3256,7 +3166,7 @@ fn test_pox_reorg_flap_duel() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -3279,11 +3189,9 @@ fn test_pox_reorg_flap_duel() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -3293,8 +3201,8 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -3305,15 +3213,15 @@ fn test_pox_reorg_flap_duel() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -3321,8 +3229,8 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); } @@ -3344,13 +3252,13 @@ fn test_pox_reorg_flap_duel() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -3362,21 +3270,21 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -3387,8 +3295,8 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -3404,8 +3312,8 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -3415,16 +3323,13 @@ fn test_pox_reorg_flap_duel() { // NOTE: the stacks affirmation maps will differ from the heaviest affirmation map, because the // act of flapping back and forth so much will have caused these nodes to forget about some of // their anchor blocks. This is an artifact of the test. - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3465,15 +3370,9 @@ fn test_pox_reorg_flap_reward_cycles() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3520,7 +3419,7 @@ fn test_pox_reorg_flap_reward_cycles() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3532,24 +3431,22 @@ fn test_pox_reorg_flap_reward_cycles() { let rpc_port = 41123 + 10 * i; let p2p_port = 41123 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3557,8 +3454,8 @@ fn test_pox_reorg_flap_reward_cycles() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in confs.iter() { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -3593,10 +3490,10 @@ fn test_pox_reorg_flap_reward_cycles() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -3621,8 +3518,8 @@ fn test_pox_reorg_flap_reward_cycles() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -3630,7 +3527,7 @@ fn test_pox_reorg_flap_reward_cycles() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -3644,23 +3541,19 @@ fn test_pox_reorg_flap_reward_cycles() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -3670,19 +3563,14 @@ fn test_pox_reorg_flap_reward_cycles() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -3694,7 +3582,7 @@ fn test_pox_reorg_flap_reward_cycles() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -3717,11 +3605,9 @@ fn test_pox_reorg_flap_reward_cycles() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -3731,8 +3617,8 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -3743,15 +3629,15 @@ fn test_pox_reorg_flap_reward_cycles() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -3759,8 +3645,8 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -3780,13 +3666,13 @@ fn test_pox_reorg_flap_reward_cycles() { // miner 1 is disabled for this reward cycle signal_mining_blocked(miner_status[1].clone()); for i in 0..20 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } } @@ -3794,8 +3680,8 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -3803,21 +3689,21 @@ fn test_pox_reorg_flap_reward_cycles() { // miner 0 is disabled for this reward cycle signal_mining_blocked(miner_status[0].clone()); for i in 0..20 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } } signal_mining_ready(miner_status[0].clone()); info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -3833,8 +3719,8 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -3844,16 +3730,13 @@ fn test_pox_reorg_flap_reward_cycles() { // NOTE: the stacks affirmation maps will differ from the heaviest affirmation map, because the // act of flapping back and forth so much will have caused these nodes to forget about some of // their anchor blocks. This is an artifact of the test. - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3897,15 +3780,9 @@ fn test_pox_missing_five_anchor_blocks() { epochs[3].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3952,7 +3829,7 @@ fn test_pox_missing_five_anchor_blocks() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3964,24 +3841,22 @@ fn test_pox_missing_five_anchor_blocks() { let rpc_port = 41103 + 10 * i; let p2p_port = 41103 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3989,8 +3864,8 @@ fn test_pox_missing_five_anchor_blocks() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -4025,10 +3900,10 @@ fn test_pox_missing_five_anchor_blocks() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -4053,8 +3928,8 @@ fn test_pox_missing_five_anchor_blocks() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -4062,7 +3937,7 @@ fn test_pox_missing_five_anchor_blocks() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -4076,23 +3951,19 @@ fn test_pox_missing_five_anchor_blocks() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -4102,19 +3973,14 @@ fn test_pox_missing_five_anchor_blocks() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -4126,7 +3992,7 @@ fn test_pox_missing_five_anchor_blocks() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -4149,11 +4015,9 @@ fn test_pox_missing_five_anchor_blocks() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -4163,8 +4027,8 @@ fn test_pox_missing_five_anchor_blocks() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -4175,15 +4039,15 @@ fn test_pox_missing_five_anchor_blocks() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -4191,8 +4055,8 @@ fn test_pox_missing_five_anchor_blocks() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -4207,13 +4071,13 @@ fn test_pox_missing_five_anchor_blocks() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {} cycle {}\n\n", i, c); + eprintln!("\n\nBuild block {i} cycle {c}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -4223,8 +4087,8 @@ fn test_pox_missing_five_anchor_blocks() { signal_mining_ready(miner_status[1].clone()); info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); @@ -4236,8 +4100,8 @@ fn test_pox_missing_five_anchor_blocks() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -4245,16 +4109,13 @@ fn test_pox_missing_five_anchor_blocks() { // wait for all blocks to propagate. // miner 1 should learn about all of miner 0's blocks - info!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + info!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}",); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -4297,15 +4158,9 @@ fn test_sortition_divergence_pre_21() { epochs[3].start_height = 241; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -4352,7 +4207,7 @@ fn test_sortition_divergence_pre_21() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -4368,24 +4223,22 @@ fn test_sortition_divergence_pre_21() { let rpc_port = 41113 + 10 * i; let p2p_port = 41113 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -4393,8 +4246,8 @@ fn test_sortition_divergence_pre_21() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -4429,10 +4282,10 @@ fn test_sortition_divergence_pre_21() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -4457,8 +4310,8 @@ fn test_sortition_divergence_pre_21() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -4466,7 +4319,7 @@ fn test_sortition_divergence_pre_21() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -4480,23 +4333,19 @@ fn test_sortition_divergence_pre_21() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -4506,19 +4355,14 @@ fn test_sortition_divergence_pre_21() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -4530,7 +4374,7 @@ fn test_sortition_divergence_pre_21() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -4553,11 +4397,9 @@ fn test_sortition_divergence_pre_21() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -4567,8 +4409,8 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -4579,15 +4421,15 @@ fn test_sortition_divergence_pre_21() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -4595,8 +4437,8 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -4611,13 +4453,13 @@ fn test_sortition_divergence_pre_21() { // mine a reward cycle in which the 2.05 rules choose a PoX anchor block, but the 2.1 rules do // not. for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len && i < reward_cycle_len - prepare_phase_len + 3 @@ -4645,27 +4487,27 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } } info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); @@ -4673,13 +4515,13 @@ fn test_sortition_divergence_pre_21() { // run some cycles in 2.1 for _ in 0..2 { for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } } } @@ -4690,24 +4532,21 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate. // miner 1 should learn about all of miner 0's blocks - info!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + info!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -4722,7 +4561,7 @@ fn trait_invocation_cross_epoch() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let trait_contract = "(define-trait simple-method ((foo (uint) (response uint uint)) ))"; let impl_contract = @@ -4897,8 +4736,7 @@ fn trait_invocation_cross_epoch() { "invoke-simple", "invocation-2", &[Value::Principal(PrincipalData::Contract( - QualifiedContractIdentifier::parse(&format!("{}.{}", &spender_addr_c32, "impl-simple")) - .unwrap(), + QualifiedContractIdentifier::parse(&format!("{spender_addr_c32}.impl-simple")).unwrap(), ))], ); let invoke_2_txid = submit_tx(&http_origin, &tx); @@ -4907,7 +4745,7 @@ fn trait_invocation_cross_epoch() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); } - let interesting_txids = vec![ + let interesting_txids = [ invoke_txid.clone(), invoke_1_txid.clone(), invoke_2_txid.clone(), @@ -4988,21 +4826,13 @@ fn test_v1_unlock_height_with_current_stackers() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -5089,7 +4919,7 @@ fn test_v1_unlock_height_with_current_stackers() { // stack right away let sort_height = channel.get_sortitions_processed() + 1; let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5110,7 +4940,7 @@ fn test_v1_unlock_height_with_current_stackers() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until epoch 2.1 @@ -5126,7 +4956,7 @@ fn test_v1_unlock_height_with_current_stackers() { let sort_height = channel.get_sortitions_processed() + 1; let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5147,7 +4977,7 @@ fn test_v1_unlock_height_with_current_stackers() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -5181,7 +5011,7 @@ fn test_v1_unlock_height_with_current_stackers() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -5200,12 +5030,10 @@ fn test_v1_unlock_height_with_current_stackers() { assert_eq!(addr_tuple, pox_addr_tuple_1); } } - } else { - if !burnchain_config.is_in_prepare_phase(height) { - assert_eq!(pox_addrs.len(), 2); - for addr_tuple in pox_addrs { - assert_eq!(addr_tuple, pox_addr_tuple_2); - } + } else if !burnchain_config.is_in_prepare_phase(height) { + assert_eq!(pox_addrs.len(), 2); + for addr_tuple in pox_addrs { + assert_eq!(addr_tuple, pox_addr_tuple_2); } } } @@ -5251,21 +5079,13 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -5355,7 +5175,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5376,7 +5196,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -5404,7 +5224,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5425,7 +5245,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -5459,7 +5279,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -5471,11 +5291,11 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); + debug!("Test burnchain height {height}"); if !burnchain_config.is_in_prepare_phase(height) { let mut have_expected_payout = false; if height < epoch_2_1 + (reward_cycle_len as u64) { - if pox_addrs.len() > 0 { + if !pox_addrs.is_empty() { assert_eq!(pox_addrs.len(), 2); for addr_tuple in pox_addrs { // can either pay to pox tuple 1, or burn @@ -5485,15 +5305,13 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { } } } - } else { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - for addr_tuple in pox_addrs { - // can either pay to pox tuple 2, or burn - assert_ne!(addr_tuple, pox_addr_tuple_1); - if addr_tuple == pox_addr_tuple_2 { - have_expected_payout = true; - } + } else if !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + for addr_tuple in pox_addrs { + // can either pay to pox tuple 2, or burn + assert_ne!(addr_tuple, pox_addr_tuple_1); + if addr_tuple == pox_addr_tuple_2 { + have_expected_payout = true; } } } diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 9bffca7c8a..dabd3ee9ed 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -58,7 +58,7 @@ fn disable_pox() { let epoch_2_2 = 255; // two blocks before next prepare phase. let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); @@ -92,31 +92,19 @@ fn disable_pox() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -210,14 +198,14 @@ fn disable_pox() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -239,7 +227,7 @@ fn disable_pox() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -267,7 +255,7 @@ fn disable_pox() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -288,7 +276,7 @@ fn disable_pox() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); let tx = make_contract_call( @@ -307,7 +295,7 @@ fn disable_pox() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -334,7 +322,7 @@ fn disable_pox() { &[Value::UInt(increase_by.into())], ); - info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.1 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..15 { @@ -361,7 +349,7 @@ fn disable_pox() { &[Value::UInt(5000)], ); - info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.1 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // finish the cycle after the 2.2 transition, @@ -397,9 +385,9 @@ fn disable_pox() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -407,7 +395,7 @@ fn disable_pox() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -419,38 +407,36 @@ fn disable_pox() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + debug!("Test burnchain height {height}"); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -518,14 +504,12 @@ fn disable_pox() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -544,8 +528,7 @@ fn disable_pox() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr - && parsed.auth.get_origin_nonce() == aborted_increase_nonce + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == aborted_increase_nonce { let contract_call = match &parsed.payload { TransactionPayload::ContractCall(cc) => cc, @@ -626,31 +609,19 @@ fn pox_2_unlock_all() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -749,14 +720,14 @@ fn pox_2_unlock_all() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -778,7 +749,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -807,7 +778,7 @@ fn pox_2_unlock_all() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -839,7 +810,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -859,7 +830,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -892,7 +863,7 @@ fn pox_2_unlock_all() { // in bitcoin block epoch_2_2 - 1, so `nonce_of_2_1_unlock_ht_call` // will be included in that bitcoin block. // this will build the last block before 2.2 activates - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let tx = make_contract_call( &spender_sk, @@ -913,19 +884,19 @@ fn pox_2_unlock_all() { // in bitcoin block epoch_2_2, so `nonce_of_2_2_unlock_ht_call` // will be included in that bitcoin block. // this block activates 2.2 - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // this *burn block* is when the unlock occurs - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // and this will mine the first block whose parent is the unlock block - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_1_account.balance as u64, @@ -943,7 +914,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance as u64, - spender_2_initial_balance - stacked - (1 * tx_fee), + spender_2_initial_balance - stacked - tx_fee, "Spender 2 should still be locked" ); assert_eq!( @@ -957,13 +928,13 @@ fn pox_2_unlock_all() { // and this will mice the bitcoin block containing the first block whose parent has >= unlock burn block // (which is the criterion for the unlock) - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_1_account.balance, @@ -978,7 +949,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance, - spender_2_initial_balance as u128 - (1 * tx_fee as u128), + spender_2_initial_balance as u128 - tx_fee as u128, "Spender 2 should be unlocked" ); assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); @@ -997,20 +968,20 @@ fn pox_2_unlock_all() { 1_000_000, ); - info!("Submit stack transfer tx to {:?}", &http_origin); + info!("Submit stack transfer tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // this wakes up the node to mine the transaction - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // this block selects the previously mined block - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); let spender_3_account = get_account(&http_origin, &spender_3_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_3_account.balance, 1_000_000, @@ -1038,7 +1009,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance, - spender_2_initial_balance as u128 - (1 * tx_fee as u128), + spender_2_initial_balance as u128 - tx_fee as u128, "Spender 2 should be unlocked" ); assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); @@ -1080,9 +1051,9 @@ fn pox_2_unlock_all() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -1090,7 +1061,7 @@ fn pox_2_unlock_all() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -1102,38 +1073,36 @@ fn pox_2_unlock_all() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + debug!("Test burnchain height {height}"); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -1183,18 +1152,16 @@ fn pox_2_unlock_all() { let cycle_counts = match reward_cycle_pox_addrs.get(&reward_cycle) { Some(x) => x, None => { - info!("No reward cycle entry = {}", reward_cycle); + info!("No reward cycle entry = {reward_cycle}"); continue; } }; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -1215,7 +1182,7 @@ fn pox_2_unlock_all() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == nonce_of_2_2_unlock_ht_call { let contract_call = match &parsed.payload { @@ -1231,7 +1198,7 @@ fn pox_2_unlock_all() { assert_eq!(result.to_string(), format!("(ok u{})", epoch_2_2 + 1)); unlock_ht_22_tested = true; } - if &tx_sender == &spender_addr + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == nonce_of_2_1_unlock_ht_call { let contract_call = match &parsed.payload { @@ -1303,15 +1270,9 @@ fn test_pox_reorg_one_flap() { epochs.truncate(5); conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -1358,7 +1319,7 @@ fn test_pox_reorg_one_flap() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -1370,25 +1331,23 @@ fn test_pox_reorg_one_flap() { let rpc_port = 41063 + 10 * i; let p2p_port = 41063 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = StacksNode::make_node_private_key_from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -1396,8 +1355,8 @@ fn test_pox_reorg_one_flap() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -1432,10 +1391,10 @@ fn test_pox_reorg_one_flap() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -1460,8 +1419,8 @@ fn test_pox_reorg_one_flap() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -1469,7 +1428,7 @@ fn test_pox_reorg_one_flap() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -1483,23 +1442,19 @@ fn test_pox_reorg_one_flap() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -1509,19 +1464,14 @@ fn test_pox_reorg_one_flap() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -1533,7 +1483,7 @@ fn test_pox_reorg_one_flap() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -1556,11 +1506,9 @@ fn test_pox_reorg_one_flap() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -1570,8 +1518,8 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -1582,15 +1530,15 @@ fn test_pox_reorg_one_flap() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -1598,8 +1546,8 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -1612,13 +1560,13 @@ fn test_pox_reorg_one_flap() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -1629,21 +1577,21 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -1655,8 +1603,8 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -1671,23 +1619,20 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 2355f7521d..12ae11945d 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -52,7 +52,7 @@ fn trait_invocation_behavior() { let spender_addr: PrincipalData = to_addr(&spender_sk).into(); let impl_contract_id = - QualifiedContractIdentifier::new(contract_addr.clone().into(), "impl-simple".into()); + QualifiedContractIdentifier::new(contract_addr.into(), "impl-simple".into()); let mut spender_nonce = 0; let fee_amount = 10_000; @@ -227,9 +227,8 @@ fn trait_invocation_behavior() { submit_tx(&http_origin, &publish_invoke); info!( - "At height = {}, epoch-2.1 = {}", - get_chain_info(&conf).burn_block_height, - epoch_2_1 + "At height = {}, epoch-2.1 = {epoch_2_1}", + get_chain_info(&conf).burn_block_height ); // wait until just before epoch 2.1 loop { @@ -509,7 +508,7 @@ fn trait_invocation_behavior() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!("Total spender txs = {}", spender_nonce); + info!("Total spender txs = {spender_nonce}"); let blocks = test_observer::get_blocks(); @@ -526,7 +525,7 @@ fn trait_invocation_behavior() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr { + if tx_sender == spender_addr { let contract_call = match &parsed.payload { TransactionPayload::ContractCall(cc) => cc, // only interested in contract calls @@ -583,29 +582,27 @@ fn trait_invocation_behavior() { assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); } - for tx_nonce in [expected_good_23_3_nonce] { - assert_eq!( - transaction_receipts[&tx_nonce].0.contract_name.as_str(), - "wrap-simple" - ); - assert_eq!( - transaction_receipts[&tx_nonce].0.function_name.as_str(), - "invocation-1" - ); - assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - } + let tx_nonce = expected_good_23_3_nonce; + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-1" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - for tx_nonce in [expected_good_23_4_nonce] { - assert_eq!( - transaction_receipts[&tx_nonce].0.contract_name.as_str(), - "wrap-simple" - ); - assert_eq!( - transaction_receipts[&tx_nonce].0.function_name.as_str(), - "invocation-2" - ); - assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - } + let tx_nonce = expected_good_23_4_nonce; + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-2" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); for tx_nonce in [expected_bad_22_1_nonce, expected_bad_22_3_nonce] { assert_eq!( @@ -632,7 +629,7 @@ fn trait_invocation_behavior() { } for (key, value) in transaction_receipts.iter() { - eprintln!("{} => {} of {}", key, value.0, value.1); + eprintln!("{key} => {} of {}", value.0, value.1); } test_observer::clear(); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 26ad007ca7..cfcc8d0d52 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -55,9 +55,9 @@ pub fn get_reward_set_entries_at_block( ) -> Result, Error> { state .get_reward_addresses(burnchain, sortdb, burn_block_height, block_id) - .and_then(|mut addrs| { + .map(|mut addrs| { addrs.sort_by_key(|k| k.reward_address.bytes()); - Ok(addrs) + addrs }) } @@ -86,7 +86,7 @@ fn fix_to_pox_contract() { let pox_3_activation_height = epoch_2_4; let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); @@ -110,31 +110,19 @@ fn fix_to_pox_contract() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -232,14 +220,14 @@ fn fix_to_pox_contract() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -261,7 +249,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -290,7 +278,7 @@ fn fix_to_pox_contract() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -311,7 +299,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -341,13 +329,13 @@ fn fix_to_pox_contract() { &[Value::UInt(5000)], ); - info!("Submit 2.2 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.2 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // transition to epoch 2.3 loop { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_3 + 1 { + if tip_info.burn_block_height > epoch_2_3 { break; } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -367,7 +355,7 @@ fn fix_to_pox_contract() { &[Value::UInt(5000)], ); - info!("Submit 2.3 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.3 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // transition to 2 blocks before epoch 2.4 @@ -411,7 +399,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.4 stacking tx to {:?}", &http_origin); + info!("Submit 2.4 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -431,7 +419,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + info!("Submit second 2.4 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -458,7 +446,7 @@ fn fix_to_pox_contract() { &[Value::UInt(increase_by.into())], ); - info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.4 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..19 { @@ -492,9 +480,9 @@ fn fix_to_pox_contract() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -502,7 +490,7 @@ fn fix_to_pox_contract() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -514,38 +502,36 @@ fn fix_to_pox_contract() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + debug!("Test burnchain height {height}"); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -624,14 +610,12 @@ fn fix_to_pox_contract() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -651,7 +635,7 @@ fn fix_to_pox_contract() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr + if tx_sender == spender_addr && (parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_2 || parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_3) { @@ -738,21 +722,13 @@ fn verify_auto_unlock_behavior() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_2_stx_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -765,11 +741,7 @@ fn verify_auto_unlock_behavior() { "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let pox_pubkey_3_stx_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -879,14 +851,14 @@ fn verify_auto_unlock_behavior() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -908,7 +880,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -937,7 +909,7 @@ fn verify_auto_unlock_behavior() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -958,7 +930,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -979,7 +951,7 @@ fn verify_auto_unlock_behavior() { // transition to epoch 2.3 loop { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_3 + 1 { + if tip_info.burn_block_height > epoch_2_3 { break; } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1044,7 +1016,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.4 stacking tx to {:?}", &http_origin); + info!("Submit 2.4 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -1064,7 +1036,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + info!("Submit second 2.4 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -1113,7 +1085,7 @@ fn verify_auto_unlock_behavior() { .unwrap(); assert_eq!(reward_set_entries.len(), 2); - info!("reward set entries: {:?}", reward_set_entries); + info!("reward set entries: {reward_set_entries:?}"); assert_eq!( reward_set_entries[0].reward_address.bytes(), pox_pubkey_2_stx_addr.bytes.0.to_vec() @@ -1141,7 +1113,7 @@ fn verify_auto_unlock_behavior() { &[Value::UInt(first_stacked_incr.into())], ); - info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.4 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..19 { @@ -1213,9 +1185,9 @@ fn verify_auto_unlock_behavior() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -1223,7 +1195,7 @@ fn verify_auto_unlock_behavior() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -1235,37 +1207,35 @@ fn verify_auto_unlock_behavior() { .expect_list() .unwrap(); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -1340,14 +1310,12 @@ fn verify_auto_unlock_behavior() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 6af1bee626..34083fb22a 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -23,7 +23,7 @@ use stacks_common::types::chainstate::StacksPrivateKey; use crate::config::InitialBalance; use crate::tests::bitcoin_regtest::BitcoinCoreController; -use crate::tests::nakamoto_integrations::wait_for; +use crate::tests::nakamoto_integrations::{next_block_and, wait_for}; use crate::tests::neon_integrations::{ get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, test_observer, wait_for_runloop, @@ -79,8 +79,8 @@ fn microblocks_disabled() { conf.node.wait_time_for_blocks = 2_000; conf.miner.wait_for_block_download = false; - conf.miner.first_attempt_time_ms = i64::max_value() as u64; - conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); test_observer::register_any(&mut conf); @@ -111,8 +111,8 @@ fn microblocks_disabled() { 4 * prepare_phase_len / 5, 5, 15, - u64::max_value() - 2, - u64::max_value() - 1, + u64::MAX - 2, + u64::MAX - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, @@ -162,6 +162,9 @@ fn microblocks_disabled() { // push us to block 205 next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // Ensure we start off with 0 microblocks + assert!(test_observer::get_microblocks().is_empty()); + let tx = make_stacks_transfer_mblock_only( &spender_1_sk, 0, @@ -172,7 +175,11 @@ fn microblocks_disabled() { ); submit_tx(&http_origin, &tx); - // wait until just before epoch 2.5 + // Wait for a microblock to be assembled + wait_for(60, || Ok(test_observer::get_microblocks().len() == 1)) + .expect("Failed to wait for microblocks to be assembled"); + + // mine Bitcoin blocks up until just before epoch 2.5 wait_for(120, || { let tip_info = get_chain_info(&conf); if tip_info.burn_block_height >= epoch_2_5 - 2 { @@ -183,6 +190,14 @@ fn microblocks_disabled() { }) .expect("Failed to wait until just before epoch 2.5"); + // Verify that the microblock was processed + let account = get_account(&http_origin, &spender_1_addr); + assert_eq!( + u64::try_from(account.balance).unwrap(), + spender_1_bal - 1_000 + ); + assert_eq!(account.nonce, 1); + let old_tip_info = get_chain_info(&conf); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -194,13 +209,8 @@ fn microblocks_disabled() { .expect("Failed to process block"); info!("Test passed processing 2.5"); - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); + // Submit another microblock only transaction let tx = make_stacks_transfer_mblock_only( &spender_1_sk, 1, @@ -211,19 +221,12 @@ fn microblocks_disabled() { ); submit_tx(&http_origin, &tx); - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..5 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for(30, || { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - return Ok(true); - } - Ok(false) - }) - .expect("Failed to mine"); - } + // Wait for a microblock to be assembled, but expect none to be assembled + wait_for(30, || Ok(test_observer::get_microblocks().len() > 1)) + .expect_err("Microblocks should not have been assembled"); + + // Mine a block to see if the microblock gets processed + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // second transaction should not have been processed! let account = get_account(&http_origin, &spender_1_addr); @@ -233,31 +236,18 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - let microblocks_assembled = test_observer::get_microblocks().len(); - info!("Microblocks assembled: {microblocks_assembled}",); - assert!( - microblocks_assembled > 0, - "There should be at least 1 microblock assembled" - ); - let miner_nonce_before_microblock_assembly = get_account(&http_origin, &miner_account).nonce; // Now, lets tell the miner to try to mine microblocks, but don't try to confirm them! + info!("Setting STACKS_TEST_FORCE_MICROBLOCKS_POST_25"); env::set_var("STACKS_TEST_FORCE_MICROBLOCKS_POST_25", "1"); - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..2 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for(30, || { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - return Ok(true); - } - Ok(false) - }) - .expect("Failed to mine"); - } + // Wait for a second microblock to be assembled + wait_for(60, || Ok(test_observer::get_microblocks().len() == 2)) + .expect("Failed to wait for microblocks to be assembled"); + + // Mine a block to see if the microblock gets processed + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let miner_nonce_after_microblock_assembly = get_account(&http_origin, &miner_account).nonce; @@ -270,44 +260,35 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - // but we should have assembled and announced at least 1 more block to the observer - assert!(test_observer::get_microblocks().len() > microblocks_assembled); info!( "Microblocks assembled: {}", test_observer::get_microblocks().len() ); // and our miner should have gotten some blocks accepted - assert!( - miner_nonce_after_microblock_assembly > miner_nonce_before_microblock_assembly, + assert_eq!( + miner_nonce_after_microblock_assembly, miner_nonce_before_microblock_assembly + 1, "Mined before started microblock assembly: {miner_nonce_before_microblock_assembly}, Mined after started microblock assembly: {miner_nonce_after_microblock_assembly}" ); // Now, tell the miner to try to confirm microblocks as well. // This should test that the block gets rejected by append block + info!("Setting STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25"); env::set_var("STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25", "1"); - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..2 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for(30, || { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - return Ok(true); - } - Ok(false) - }) - .expect("Failed to mine"); - } + // Wait for a third microblock to be assembled + wait_for(60, || Ok(test_observer::get_microblocks().len() == 3)) + .expect("Failed to wait for microblocks to be assembled"); + + // Mine a block to see if the microblock gets processed + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let miner_nonce_after_microblock_confirmation = get_account(&http_origin, &miner_account).nonce; - // and our miner should have gotten at most one more block accepted - // (because they may have had 1 block confirmation in the bitcoin mempool which didn't confirm a microblock - // before we flipped the flag) - assert!( - miner_nonce_after_microblock_confirmation <= miner_nonce_after_microblock_assembly + 1, + // our miner should not have gotten any more blocks accepted + assert_eq!( + miner_nonce_after_microblock_confirmation, + miner_nonce_after_microblock_assembly + 1, "Mined after started microblock confimration: {miner_nonce_after_microblock_confirmation}", ); diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 236d76b000..574b18e964 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -1,3 +1,4 @@ +use std::cmp::Ordering; use std::collections::HashMap; use std::fmt::Write; use std::sync::Mutex; @@ -43,7 +44,7 @@ use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::make_sponsored_stacks_transfer_on_testnet; -const OTHER_CONTRACT: &'static str = " +const OTHER_CONTRACT: &str = " (define-data-var x uint u0) (define-public (f1) (ok (var-get x))) @@ -51,14 +52,14 @@ const OTHER_CONTRACT: &'static str = " (ok (var-set x val))) "; -const CALL_READ_CONTRACT: &'static str = " +const CALL_READ_CONTRACT: &str = " (define-public (public-no-write) (ok (contract-call? .other f1))) (define-public (public-write) (ok (contract-call? .other f2 u5))) "; -const GET_INFO_CONTRACT: &'static str = " +const GET_INFO_CONTRACT: &str = " (define-map block-data { height: uint } { stacks-hash: (buff 32), @@ -143,7 +144,7 @@ const GET_INFO_CONTRACT: &'static str = " (fn-2 (uint) (response uint uint)))) "; -const IMPL_TRAIT_CONTRACT: &'static str = " +const IMPL_TRAIT_CONTRACT: &str = " ;; explicit trait compliance for trait 1 (impl-trait .get-info.trait-1) (define-private (test-height) burn-block-height) @@ -193,7 +194,7 @@ fn integration_test_get_info() { { let mut http_opt = HTTP_BINDING.lock().unwrap(); - http_opt.replace(format!("http://{}", &rpc_bind)); + http_opt.replace(format!("http://{rpc_bind}")); } run_loop @@ -279,10 +280,10 @@ fn integration_test_get_info() { let old_tip = StacksBlockId::new(&consensus_hash, &header_hash); use std::fs; use std::io::Write; - if fs::metadata(&tmppath).is_ok() { - fs::remove_file(&tmppath).unwrap(); + if fs::metadata(tmppath).is_ok() { + fs::remove_file(tmppath).unwrap(); } - let mut f = fs::File::create(&tmppath).unwrap(); + let mut f = fs::File::create(tmppath).unwrap(); f.write_all(&old_tip.serialize_to_vec()).unwrap(); } else if round == 2 { // block-height = 3 @@ -311,7 +312,7 @@ fn integration_test_get_info() { // block-height > 3 let tx = make_contract_call( &principal_sk, - (round - 3).into(), + round - 3, 10, CHAIN_ID_TESTNET, &to_addr(&contract_sk), @@ -337,7 +338,7 @@ fn integration_test_get_info() { if round >= 1 { let tx_xfer = make_stacks_transfer( &spender_sk, - (round - 1).into(), + round - 1, 10, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), @@ -356,16 +357,14 @@ fn integration_test_get_info() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state(|round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_addr = to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()); let contract_identifier = - QualifiedContractIdentifier::parse(&format!("{}.{}", &contract_addr, "get-info")).unwrap(); + QualifiedContractIdentifier::parse(&format!("{contract_addr}.get-info")).unwrap(); let impl_trait_contract_identifier = - QualifiedContractIdentifier::parse(&format!("{}.{}", &contract_addr, "impl-trait-contract")).unwrap(); + QualifiedContractIdentifier::parse(&format!("{contract_addr}.impl-trait-contract")).unwrap(); let http_origin = { HTTP_BINDING.lock().unwrap().clone().unwrap() @@ -374,7 +373,7 @@ fn integration_test_get_info() { match round { 1 => { // - Chain length should be 2. - let blocks = StacksChainState::list_blocks(&chain_state.db()).unwrap(); + let blocks = StacksChainState::list_blocks(chain_state.db()).unwrap(); assert!(chain_tip.metadata.stacks_block_height == 2); // Block #1 should have 5 txs @@ -382,14 +381,14 @@ fn integration_test_get_info() { let parent = chain_tip.block.header.parent_block; let bhh = &chain_tip.metadata.index_block_hash(); - eprintln!("Current Block: {} Parent Block: {}", bhh, parent); + eprintln!("Current Block: {bhh} Parent Block: {parent}"); let parent_val = Value::buff_from(parent.as_bytes().to_vec()).unwrap(); // find header metadata let mut headers = vec![]; for block in blocks.iter() { let header = StacksChainState::get_anchored_block_header_info(chain_state.db(), &block.0, &block.1).unwrap().unwrap(); - eprintln!("{}/{}: {:?}", &block.0, &block.1, &header); + eprintln!("{}/{}: {header:?}", &block.0, &block.1); headers.push(header); } @@ -500,13 +499,12 @@ fn integration_test_get_info() { burn_dbconn, bhh, &contract_identifier, "(exotic-data-checks u4)")); let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/map_entry/{}/{}/{}", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() @@ -514,14 +512,14 @@ fn integration_test_get_info() { let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); - assert!(res.get("proof").is_some()); + assert!(res.contains_key("proof")); assert_eq!(result_data, expected_data); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(100))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() @@ -532,19 +530,18 @@ fn integration_test_get_info() { let sender_addr = to_addr(&StacksPrivateKey::from_hex(SK_3).unwrap()); // now, let's use a query string to get data without a proof - let path = format!("{}/v2/map_entry/{}/{}/{}?proof=0", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data?proof=0"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); - assert!(res.get("proof").is_none()); + assert!(!res.contains_key("proof")); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); @@ -553,19 +550,18 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // now, let's use a query string to get data _with_ a proof - let path = format!("{}/v2/map_entry/{}/{}/{}?proof=1", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data?proof=1"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); - assert!(res.get("proof").is_some()); + assert!(res.contains_key("proof")); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); @@ -574,9 +570,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // account with a nonce entry + a balance entry - let path = format!("{}/v2/accounts/{}", - &http_origin, &sender_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{sender_addr}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 99860); assert_eq!(res.nonce, 4); @@ -584,9 +579,8 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with a nonce entry but not a balance entry - let path = format!("{}/v2/accounts/{}", - &http_origin, &contract_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{contract_addr}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 960); assert_eq!(res.nonce, 4); @@ -594,9 +588,8 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with a balance entry but not a nonce entry - let path = format!("{}/v2/accounts/{}", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); @@ -604,27 +597,24 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with neither! - let path = format!("{}/v2/accounts/{}.get-info", - &http_origin, &contract_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{contract_addr}.get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 0); assert_eq!(res.nonce, 0); assert!(res.nonce_proof.is_some()); assert!(res.balance_proof.is_some()); - let path = format!("{}/v2/accounts/{}?proof=0", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}?proof=0"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); assert!(res.nonce_proof.is_none()); assert!(res.balance_proof.is_none()); - let path = format!("{}/v2/accounts/{}?proof=1", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}?proof=1"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); @@ -632,15 +622,15 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // let's try getting the transfer cost - let path = format!("{}/v2/fees/transfer", &http_origin); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/fees/transfer"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert!(res > 0); // let's get a contract ABI - let path = format!("{}/v2/contracts/interface/{}/{}", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/interface/{contract_addr}/get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); let contract_analysis = mem_type_check(GET_INFO_CONTRACT, ClarityVersion::Clarity2, StacksEpochId::Epoch21).unwrap().1; @@ -652,14 +642,14 @@ fn integration_test_get_info() { // a missing one? - let path = format!("{}/v2/contracts/interface/{}/{}", &http_origin, &contract_addr, "not-there"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/interface/{contract_addr}/not-there"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // let's get a contract SRC - let path = format!("{}/v2/contracts/source/{}/{}", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(res.source, GET_INFO_CONTRACT); @@ -667,8 +657,8 @@ fn integration_test_get_info() { assert!(res.marf_proof.is_some()); - let path = format!("{}/v2/contracts/source/{}/{}?proof=0", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/get-info?proof=0"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(res.source, GET_INFO_CONTRACT); @@ -677,14 +667,14 @@ fn integration_test_get_info() { // a missing one? - let path = format!("{}/v2/contracts/source/{}/{}", &http_origin, &contract_addr, "not-there"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/not-there"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // how about a read-only function call! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "get-exotic-data-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -705,8 +695,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // how about a non read-only function call which does not modify anything - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "main", "public-no-write"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/main/public-no-write"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -732,8 +722,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // how about a non read-only function call which does modify something and should fail - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "main", "public-write"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/main/public-write"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -750,9 +740,8 @@ fn integration_test_get_info() { assert!(res["cause"].as_str().unwrap().contains("NotReadOnly")); // let's try a call with a url-encoded string. - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", - "get-exotic-data-info%3F"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info%3F"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -774,8 +763,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // let's have a runtime error! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "get-exotic-data-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -793,8 +782,8 @@ fn integration_test_get_info() { assert!(res["cause"].as_str().unwrap().contains("UnwrapFailure")); // let's have a runtime error! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "update-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/update-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -814,13 +803,13 @@ fn integration_test_get_info() { // let's submit a valid transaction! let spender_sk = StacksPrivateKey::from_hex(SK_3).unwrap(); - let path = format!("{}/v2/transactions", &http_origin); - eprintln!("Test: POST {} (valid)", path); + let path = format!("{http_origin}/v2/transactions"); + eprintln!("Test: POST {path} (valid)"); // tx_xfer is 180 bytes long let tx_xfer = make_stacks_transfer( &spender_sk, - round.into(), + round, 200, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), @@ -846,17 +835,17 @@ fn integration_test_get_info() { .send() .unwrap().json::().unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!(res.get("error").unwrap().as_str().unwrap(), "transaction rejected"); assert!(res.get("reason").is_some()); // let's submit an invalid transaction! - let path = format!("{}/v2/transactions", &http_origin); - eprintln!("Test: POST {} (invalid)", path); + let path = format!("{http_origin}/v2/transactions"); + eprintln!("Test: POST {path} (invalid)"); // tx_xfer_invalid is 180 bytes long // bad nonce - let tx_xfer_invalid = make_stacks_transfer(&spender_sk, (round + 30).into(), 200, CHAIN_ID_TESTNET, + let tx_xfer_invalid = make_stacks_transfer(&spender_sk, round + 30, 200, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), 456); let tx_xfer_invalid_tx = StacksTransaction::consensus_deserialize(&mut &tx_xfer_invalid[..]).unwrap(); @@ -869,39 +858,39 @@ fn integration_test_get_info() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!(res.get("txid").unwrap().as_str().unwrap(), format!("{}", tx_xfer_invalid_tx.txid())); assert_eq!(res.get("error").unwrap().as_str().unwrap(), "transaction rejected"); assert!(res.get("reason").is_some()); // testing /v2/trait// // trait does not exist - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "get-info", &contract_addr, "get-info", "dummy-trait"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/traits/{contract_addr}/get-info/{contract_addr}/get-info/dummy-trait"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // explicit trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // No trait found - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-4"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-4"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // implicit trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-2"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-2"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // invalid trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-3"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-3"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(!res.is_implemented); // test query parameters for v2/trait endpoint @@ -911,33 +900,33 @@ fn integration_test_get_info() { let tmppath = "/tmp/integration_test_get_info-old-tip"; use std::fs; use std::io::Read; - let mut f = fs::File::open(&tmppath).unwrap(); + let mut f = fs::File::open(tmppath).unwrap(); let mut buf = vec![]; f.read_to_end(&mut buf).unwrap(); let old_tip = StacksBlockId::consensus_deserialize(&mut &buf[..]).unwrap(); - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip={}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1", &old_tip); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1?tip={old_tip}"); let res = client.get(&path).send().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert_eq!(res.text().unwrap(), "No contract analysis found or trait definition not found"); // evaluate check for explicit compliance where tip is the chain tip of the first block (contract DNE at that block), but tip is "latest" - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip=latest", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1?tip=latest"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // perform some tests of the fee rate interface - let path = format!("{}/v2/fees/transaction", &http_origin); + let path = format!("{http_origin}/v2/fees/transaction"); let tx_payload = - TransactionPayload::TokenTransfer(contract_addr.clone().into(), 10_000_000, TokenTransferMemo([0; 34])); + TransactionPayload::TokenTransfer(contract_addr.into(), 10_000_000, TokenTransferMemo([0; 34])); let payload_data = tx_payload.serialize_to_vec(); let payload_hex = format!("0x{}", to_hex(&payload_data)); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let body = json!({ "transaction_payload": payload_hex.clone() }); @@ -948,7 +937,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - eprintln!("{}", res); + eprintln!("{res}"); // destruct the json result // estimated_cost for transfers should be 0 -- their cost is just in their length @@ -975,11 +964,11 @@ fn integration_test_get_info() { .map(|x| x.get("fee").expect("Should have fee field")) .collect(); - assert!(estimated_fee_rates.len() == 3, "Fee rates should be length 3 array"); - assert!(estimated_fees.len() == 3, "Fees should be length 3 array"); + assert_eq!(estimated_fee_rates.len(), 3, "Fee rates should be length 3 array"); + assert_eq!(estimated_fees.len(), 3, "Fees should be length 3 array"); let tx_payload = TransactionPayload::from(TransactionContractCall { - address: contract_addr.clone(), + address: contract_addr, contract_name: "get-info".into(), function_name: "update-info".into(), function_args: vec![], @@ -988,7 +977,7 @@ fn integration_test_get_info() { let payload_data = tx_payload.serialize_to_vec(); let payload_hex = to_hex(&payload_data); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let body = json!({ "transaction_payload": payload_hex.clone() }); @@ -999,7 +988,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - eprintln!("{}", res); + eprintln!("{res}"); // destruct the json result // estimated_cost for transfers should be non-zero @@ -1026,11 +1015,11 @@ fn integration_test_get_info() { .map(|x| x.get("fee").expect("Should have fee field")) .collect(); - assert!(estimated_fee_rates.len() == 3, "Fee rates should be length 3 array"); - assert!(estimated_fees.len() == 3, "Fees should be length 3 array"); + assert_eq!(estimated_fee_rates.len(), 3, "Fee rates should be length 3 array"); + assert_eq!(estimated_fees.len(), 3, "Fees should be length 3 array"); let tx_payload = TransactionPayload::from(TransactionContractCall { - address: contract_addr.clone(), + address: contract_addr, contract_name: "get-info".into(), function_name: "update-info".into(), function_args: vec![], @@ -1041,7 +1030,7 @@ fn integration_test_get_info() { let estimated_len = 1550; let body = json!({ "transaction_payload": payload_hex.clone(), "estimated_len": estimated_len }); - info!("POST body\n {}", body); + info!("POST body\n {body}"); let res = client.post(&path) .json(&body) @@ -1050,7 +1039,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - info!("{}", res); + info!("{res}"); // destruct the json result // estimated_cost for transfers should be non-zero @@ -1094,7 +1083,7 @@ fn integration_test_get_info() { run_loop.start(num_rounds).unwrap(); } -const FAUCET_CONTRACT: &'static str = " +const FAUCET_CONTRACT: &str = " (define-public (spout) (let ((recipient tx-sender)) (print (as-contract (stx-transfer? u1 .faucet recipient))))) @@ -1111,7 +1100,7 @@ fn contract_stx_transfer() { conf.burnchain.commit_anchor_block_within = 5000; conf.add_initial_balance(addr_3.to_string(), 100000); conf.add_initial_balance( - to_addr(&StacksPrivateKey::from_hex(&SK_2).unwrap()).to_string(), + to_addr(&StacksPrivateKey::from_hex(SK_2).unwrap()).to_string(), 1000, ); conf.add_initial_balance(to_addr(&contract_sk).to_string(), 1000); @@ -1133,9 +1122,8 @@ fn contract_stx_transfer() { let consensus_hash = chain_tip.metadata.consensus_hash; let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1226,7 +1214,7 @@ fn contract_stx_transfer() { .submit_raw( &mut chainstate_copy, &sortdb, - &consensus_hash, + consensus_hash, &header_hash, tx, &ExecutionCost::max_value(), @@ -1287,30 +1275,27 @@ fn contract_stx_transfer() { .unwrap_err() { MemPoolRejection::ConflictingNonceInMempool => (), - e => panic!("{:?}", e), + e => panic!("{e:?}"), }; } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); match round { 1 => { - assert!(chain_tip.metadata.stacks_block_height == 2); + assert_eq!(chain_tip.metadata.stacks_block_height, 2); // Block #1 should have 2 txs -- coinbase + transfer assert_eq!(chain_tip.block.txs.len(), 2); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that 1000 stx _was_ transfered to the contract principal @@ -1353,19 +1338,19 @@ fn contract_stx_transfer() { ); } 2 => { - assert!(chain_tip.metadata.stacks_block_height == 3); + assert_eq!(chain_tip.metadata.stacks_block_height, 3); // Block #2 should have 2 txs -- coinbase + publish assert_eq!(chain_tip.block.txs.len(), 2); } 3 => { - assert!(chain_tip.metadata.stacks_block_height == 4); + assert_eq!(chain_tip.metadata.stacks_block_height, 4); // Block #3 should have 2 txs -- coinbase + contract-call, // the second publish _should have been rejected_ assert_eq!(chain_tip.block.txs.len(), 2); // check that 1 stx was transfered to SK_2 via the contract-call let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); @@ -1408,7 +1393,7 @@ fn contract_stx_transfer() { ); } 4 => { - assert!(chain_tip.metadata.stacks_block_height == 5); + assert_eq!(chain_tip.metadata.stacks_block_height, 5); assert_eq!( chain_tip.block.txs.len() as u64, MAXIMUM_MEMPOOL_TX_CHAINING + 1, @@ -1416,7 +1401,7 @@ fn contract_stx_transfer() { ); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); @@ -1491,9 +1476,8 @@ fn mine_transactions_out_of_order() { let consensus_hash = chain_tip.metadata.consensus_hash; let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1578,16 +1562,13 @@ fn mine_transactions_out_of_order() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1610,7 +1591,7 @@ fn mine_transactions_out_of_order() { // check that 1000 stx _was_ transfered to the contract principal let curr_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); assert_eq!( @@ -1698,15 +1679,14 @@ fn mine_contract_twice() { run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); if round == 2 { let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that the contract published! @@ -1761,9 +1741,8 @@ fn bad_contract_tx_rollback() { let addr_2 = to_addr(&sk_2); let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1872,16 +1851,13 @@ fn bad_contract_tx_rollback() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1892,7 +1868,7 @@ fn bad_contract_tx_rollback() { assert_eq!(chain_tip.block.txs.len(), 2); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that 1000 stx _was_ transfered to the contract principal @@ -1967,10 +1943,8 @@ fn make_expensive_contract(inner_loop: &str, other_decl: &str) -> String { for i in 0..10 { contract.push('\n'); contract.push_str(&format!( - "(define-constant list-{} (concat list-{} list-{}))", + "(define-constant list-{} (concat list-{i} list-{i}))", i + 1, - i, - i )); } @@ -2083,7 +2057,7 @@ fn block_limit_runtime_test() { let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; let spender_sks = make_keys(seed, 500); for sk in spender_sks.iter() { - conf.add_initial_balance(to_addr(&sk).to_string(), 1000); + conf.add_initial_balance(to_addr(sk).to_string(), 1000); } let num_rounds = 6; @@ -2097,9 +2071,8 @@ fn block_limit_runtime_test() { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); let (consensus_hash, block_hash) = ( @@ -2107,45 +2080,15 @@ fn block_limit_runtime_test() { &tenure.parent_block.metadata.anchored_header.block_hash(), ); - if round == 1 { - let publish_tx = make_contract_publish( - &contract_sk, - 0, - 10, - CHAIN_ID_TESTNET, - "hello-contract", - EXPENSIVE_CONTRACT.as_str(), - ); - tenure - .mem_pool - .submit_raw( - &mut chainstate_copy, - &sortdb, - consensus_hash, - block_hash, - publish_tx, - &ExecutionCost::max_value(), - &StacksEpochId::Epoch21, - ) - .unwrap(); - } else if round > 1 { - eprintln!("Begin Round: {}", round); - let to_submit = 2 * (round - 1); - - let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; - let spender_sks = make_keys(seed, 500); - - for i in 0..to_submit { - let sk = &spender_sks[(i + round * round) as usize]; - let tx = make_contract_call( - sk, + match round.cmp(&1) { + Ordering::Equal => { + let publish_tx = make_contract_publish( + &contract_sk, 0, 10, CHAIN_ID_TESTNET, - &to_addr(&contract_sk), "hello-contract", - "do-it", - &[], + EXPENSIVE_CONTRACT.as_str(), ); tenure .mem_pool @@ -2154,24 +2097,55 @@ fn block_limit_runtime_test() { &sortdb, consensus_hash, block_hash, - tx, + publish_tx, &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ) .unwrap(); } - } - - return; + Ordering::Greater => { + eprintln!("Begin Round: {round}"); + let to_submit = 2 * (round - 1); + + let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; + let spender_sks = make_keys(seed, 500); + + for i in 0..to_submit { + let sk = &spender_sks[(i + round * round) as usize]; + let tx = make_contract_call( + sk, + 0, + 10, + CHAIN_ID_TESTNET, + &to_addr(&contract_sk), + "hello-contract", + "do-it", + &[], + ); + tenure + .mem_pool + .submit_raw( + &mut chainstate_copy, + &sortdb, + consensus_hash, + block_hash, + tx, + &ExecutionCost::max_value(), + &StacksEpochId::Epoch21, + ) + .unwrap(); + } + } + Ordering::Less => {} + }; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _chain_state, block, _chain_tip_info, _burn_dbconn| { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); @@ -2180,7 +2154,7 @@ fn block_limit_runtime_test() { // Block #1 should have 3 txs -- coinbase + 2 contract calls... assert_eq!(block.block.txs.len(), 3); } - 3 | 4 | 5 => { + 3..=5 => { // Block >= 2 should have 4 txs -- coinbase + 3 contract calls // because the _subsequent_ transactions should never have been // included. @@ -2215,7 +2189,7 @@ fn mempool_errors() { { let mut http_opt = HTTP_BINDING.lock().unwrap(); - http_opt.replace(format!("http://{}", &rpc_bind)); + http_opt.replace(format!("http://{rpc_bind}")); } let mut run_loop = RunLoop::new(conf); @@ -2254,22 +2228,19 @@ fn mempool_errors() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _chain_state, _block, _chain_tip_info, _burn_dbconn| { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); let http_origin = { HTTP_BINDING.lock().unwrap().clone().unwrap() }; let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let spender_sk = StacksPrivateKey::from_hex(SK_3).unwrap(); let spender_addr = to_addr(&spender_sk); @@ -2277,7 +2248,7 @@ fn mempool_errors() { if round == 1 { // let's submit an invalid transaction! - eprintln!("Test: POST {} (invalid)", path); + eprintln!("Test: POST {path} (invalid)"); let tx_xfer_invalid = make_stacks_transfer( &spender_sk, 30, // bad nonce -- too much chaining @@ -2298,7 +2269,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2312,7 +2283,7 @@ fn mempool_errors() { "TooMuchChaining" ); let data = res.get("reason_data").unwrap(); - assert_eq!(data.get("is_origin").unwrap().as_bool().unwrap(), true); + assert!(data.get("is_origin").unwrap().as_bool().unwrap()); assert_eq!( data.get("principal").unwrap().as_str().unwrap(), &spender_addr.to_string() @@ -2340,7 +2311,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2374,7 +2345,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2419,7 +2390,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index b701e70a15..58a526ba30 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -31,13 +31,13 @@ use super::{ use crate::helium::RunLoop; use crate::Keychain; -const FOO_CONTRACT: &'static str = "(define-public (foo) (ok 1)) +const FOO_CONTRACT: &str = "(define-public (foo) (ok 1)) (define-public (bar (x uint)) (ok x))"; -const TRAIT_CONTRACT: &'static str = "(define-trait tr ((value () (response uint uint))))"; -const USE_TRAIT_CONTRACT: &'static str = "(use-trait tr-trait .trait-contract.tr) +const TRAIT_CONTRACT: &str = "(define-trait tr ((value () (response uint uint))))"; +const USE_TRAIT_CONTRACT: &str = "(use-trait tr-trait .trait-contract.tr) (define-public (baz (abc )) (ok (contract-of abc)))"; -const IMPLEMENT_TRAIT_CONTRACT: &'static str = "(define-public (value) (ok u1))"; -const BAD_TRAIT_CONTRACT: &'static str = "(define-public (foo-bar) (ok u1))"; +const IMPLEMENT_TRAIT_CONTRACT: &str = "(define-public (value) (ok u1))"; +const BAD_TRAIT_CONTRACT: &str = "(define-public (foo-bar) (ok u1))"; pub fn make_bad_stacks_transfer( sender: &StacksPrivateKey, @@ -318,17 +318,13 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!( - if let MemPoolRejection::FailedToValidate(ChainstateError::NetError( - NetError::VerifyingError(_), - )) = e - { - true - } else { - false - } - ); + eprintln!("Err: {e:?}"); + assert!(matches!( + e, + MemPoolRejection::FailedToValidate(ChainstateError::NetError( + NetError::VerifyingError(_) + )) + )); // mismatched network on contract-call! let bad_addr = StacksAddress::from_public_keys( @@ -337,8 +333,7 @@ fn mempool_setup_chainstate() { 1, &vec![StacksPublicKey::from_private(&other_sk)], ) - .unwrap() - .into(); + .unwrap(); let tx_bytes = make_contract_call( &contract_sk, @@ -362,11 +357,7 @@ fn mempool_setup_chainstate() { ) .unwrap_err(); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // mismatched network on transfer! let bad_addr = StacksAddress::from_public_keys( @@ -391,11 +382,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // bad fees let tx_bytes = @@ -411,12 +398,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::FeeTooLow(0, _) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::FeeTooLow(0, _))); // bad nonce let tx_bytes = @@ -432,12 +415,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadNonces(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadNonces(_))); // not enough funds let tx_bytes = make_stacks_transfer( @@ -459,15 +438,11 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(111000, 99500))); // sender == recipient - let contract_princ = PrincipalData::from(contract_addr.clone()); + let contract_princ = PrincipalData::from(contract_addr); let tx_bytes = make_stacks_transfer( &contract_sk, 5, @@ -487,7 +462,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(if let MemPoolRejection::TransferRecipientIsSender(r) = e { r == contract_princ } else { @@ -517,15 +492,11 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // tx version must be testnet - let contract_princ = PrincipalData::from(contract_addr.clone()); + let contract_princ = PrincipalData::from(contract_addr); let payload = TransactionPayload::TokenTransfer( contract_princ.clone(), 1000, @@ -551,12 +522,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadTransactionVersion = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadTransactionVersion)); // send amount must be positive let tx_bytes = @@ -572,12 +539,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::TransferAmountMustBePositive = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::TransferAmountMustBePositive)); // not enough funds let tx_bytes = make_stacks_transfer( @@ -599,12 +562,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(111000, 99500))); let tx_bytes = make_stacks_transfer( &contract_sk, @@ -625,12 +584,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(100700, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(100700, 99500))); let tx_bytes = make_contract_call( &contract_sk, @@ -653,12 +608,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoSuchContract = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoSuchContract)); let tx_bytes = make_contract_call( &contract_sk, @@ -681,12 +632,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoSuchPublicFunction = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoSuchPublicFunction)); let tx_bytes = make_contract_call( &contract_sk, @@ -709,12 +656,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadFunctionArgument(_))); let tx_bytes = make_contract_publish( &contract_sk, @@ -735,12 +678,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::ContractAlreadyExists(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::ContractAlreadyExists(_))); let microblock_1 = StacksMicroblockHeader { version: 0, @@ -777,13 +716,13 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let microblock_1 = StacksMicroblockHeader { version: 0, sequence: 0, - prev_block: block_hash.clone(), + prev_block: *block_hash, tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), signature: MessageSignature([0; 65]), }; @@ -791,7 +730,7 @@ fn mempool_setup_chainstate() { let microblock_2 = StacksMicroblockHeader { version: 0, sequence: 0, - prev_block: block_hash.clone(), + prev_block: *block_hash, tx_merkle_root: Sha512Trunc256Sum::from_data(&[1, 2, 3]), signature: MessageSignature([0; 65]), }; @@ -815,7 +754,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let mut microblock_1 = StacksMicroblockHeader { @@ -856,7 +795,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let tx_bytes = make_coinbase(&contract_sk, 5, 1000, CHAIN_ID_TESTNET); @@ -871,12 +810,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoCoinbaseViaMempool = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoCoinbaseViaMempool)); // find the correct priv-key let mut secret_key = None; @@ -936,12 +871,12 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let contract_id = QualifiedContractIdentifier::new( - StandardPrincipalData::from(contract_addr.clone()), - ContractName::try_from("implement-trait-contract").unwrap(), + StandardPrincipalData::from(contract_addr), + ContractName::from("implement-trait-contract"), ); let contract_principal = PrincipalData::Contract(contract_id.clone()); @@ -968,8 +903,8 @@ fn mempool_setup_chainstate() { .unwrap(); let contract_id = QualifiedContractIdentifier::new( - StandardPrincipalData::from(contract_addr.clone()), - ContractName::try_from("bad-trait-contract").unwrap(), + StandardPrincipalData::from(contract_addr), + ContractName::from("bad-trait-contract"), ); let contract_principal = PrincipalData::Contract(contract_id.clone()); @@ -994,11 +929,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadFunctionArgument(_))); } }, ); diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 2c555e7232..6f02ecf138 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -81,11 +81,11 @@ pub const STORE_CONTRACT: &str = r#"(define-map store { key: (string-ascii 32) } (ok true)))"#; // ./blockstack-cli --testnet publish 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 0 0 store /tmp/out.clar -pub const SK_1: &'static str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; -pub const SK_2: &'static str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; -pub const SK_3: &'static str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; +pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; +pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; +pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; -pub const ADDR_4: &'static str = "ST31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZZ239N96"; +pub const ADDR_4: &str = "ST31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZZ239N96"; lazy_static! { pub static ref PUBLISH_CONTRACT: Vec = make_contract_publish( @@ -133,6 +133,7 @@ pub fn insert_new_port(port: u16) -> bool { ports.insert(port) } +#[allow(clippy::too_many_arguments)] pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, @@ -215,6 +216,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( ) } +#[allow(clippy::too_many_arguments)] pub fn serialize_sign_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, @@ -401,10 +403,10 @@ pub fn set_random_binds(config: &mut Config) { let rpc_port = gen_random_port(); let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; - config.node.rpc_bind = format!("{}:{}", localhost, rpc_port); - config.node.p2p_bind = format!("{}:{}", localhost, p2p_port); - config.node.data_url = format!("http://{}:{}", localhost, rpc_port); - config.node.p2p_address = format!("{}:{}", localhost, p2p_port); + config.node.rpc_bind = format!("{localhost}:{rpc_port}"); + config.node.p2p_bind = format!("{localhost}:{p2p_port}"); + config.node.data_url = format!("http://{localhost}:{rpc_port}"); + config.node.p2p_address = format!("{localhost}:{p2p_port}"); } pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { @@ -427,9 +429,10 @@ pub fn make_stacks_transfer( ) -> Vec { let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_sponsored_stacks_transfer_on_testnet( sender: &StacksPrivateKey, payer: &StacksPrivateKey, @@ -443,7 +446,7 @@ pub fn make_sponsored_stacks_transfer_on_testnet( let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); serialize_sign_sponsored_sig_tx_anchor_mode_version( - payload.into(), + payload, sender, payer, sender_nonce, @@ -466,7 +469,7 @@ pub fn make_stacks_transfer_mblock_only( let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); serialize_sign_standard_single_sig_tx_anchor_mode( - payload.into(), + payload, sender, nonce, tx_fee, @@ -484,14 +487,15 @@ pub fn make_poison( header_2: StacksMicroblockHeader, ) -> Vec { let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_contract_call( sender: &StacksPrivateKey, nonce: u64, @@ -506,15 +510,16 @@ pub fn make_contract_call( let function_name = ClarityName::from(function_name); let payload = TransactionContractCall { - address: contract_addr.clone(), + address: *contract_addr, contract_name, function_name, - function_args: function_args.iter().map(|x| x.clone()).collect(), + function_args: function_args.to_vec(), }; serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_contract_call_mblock_only( sender: &StacksPrivateKey, nonce: u64, @@ -529,10 +534,10 @@ pub fn make_contract_call_mblock_only( let function_name = ClarityName::from(function_name); let payload = TransactionContractCall { - address: contract_addr.clone(), + address: *contract_addr, contract_name, function_name, - function_args: function_args.iter().map(|x| x.clone()).collect(), + function_args: function_args.to_vec(), }; serialize_sign_standard_single_sig_tx_anchor_mode( @@ -558,7 +563,7 @@ fn make_microblock( let mut microblock_builder = StacksMicroblockBuilder::new( block.block_hash(), - consensus_hash.clone(), + consensus_hash, chainstate, burn_dbconn, BlockBuilderSettings::max_value(), @@ -576,10 +581,9 @@ fn make_microblock( // NOTE: we intentionally do not check the block's microblock pubkey hash against the private // key, because we may need to test that microblocks get rejected due to bad signatures. - let microblock = microblock_builder + microblock_builder .mine_next_microblock_from_txs(mempool_txs, privk) - .unwrap(); - microblock + .unwrap() } /// Deserializes the `StacksTransaction` objects from `blocks` and returns all those that @@ -601,7 +605,7 @@ pub fn select_transactions_where( } } - return result; + result } /// This function will call `next_block_and_wait` until the burnchain height underlying `BitcoinRegtestController` @@ -614,20 +618,19 @@ pub fn run_until_burnchain_height( target_height: u64, conf: &Config, ) -> bool { - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); let mut current_height = tip_info.burn_block_height; while current_height < target_height { eprintln!( - "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({})", - get_epoch_time_secs(), - current_height + "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({current_height})", + get_epoch_time_secs() ); - let next_result = next_block_and_wait(btc_regtest_controller, &blocks_processed); + let next_result = next_block_and_wait(btc_regtest_controller, blocks_processed); if !next_result { return false; } - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); current_height = tip_info.burn_block_height; } @@ -717,7 +720,6 @@ fn should_succeed_mining_valid_txs() { }, _ => {} }; - return }); // Use block's hook for asserting expectations @@ -743,18 +745,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the smart contract published let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::SmartContract(..) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::SmartContract(..) + )); // 0 event should have been produced let events: Vec = chain_tip @@ -762,7 +764,7 @@ fn should_succeed_mining_valid_txs() { .iter() .flat_map(|a| a.events.clone()) .collect(); - assert!(events.len() == 0); + assert!(events.is_empty()); } 2 => { // Inspecting the chain at round 2. @@ -775,18 +777,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the get-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 2 lockup events should have been produced let events: Vec = chain_tip @@ -807,18 +809,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the set-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 2 lockup events + 1 contract event should have been produced let events: Vec = chain_tip @@ -832,7 +834,7 @@ fn should_succeed_mining_valid_txs() { format!("{}", data.key.0) == "STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A.store" && data.key.1 == "print" - && format!("{}", data.value) == "\"Setting key foo\"".to_string() + && format!("{}", data.value) == "\"Setting key foo\"" } _ => false, }); @@ -848,18 +850,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the get-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 1 event should have been produced let events: Vec = chain_tip @@ -873,7 +875,7 @@ fn should_succeed_mining_valid_txs() { format!("{}", data.key.0) == "STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A.store" && data.key.1 == "print" - && format!("{}", data.value) == "\"Getting key foo\"".to_string() + && format!("{}", data.value) == "\"Getting key foo\"" } _ => false, }); @@ -889,19 +891,19 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the STX transfer let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::TokenTransfer(_, _, _) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::TokenTransfer(_, _, _) + )); // 1 event should have been produced let events: Vec = chain_tip @@ -996,7 +998,6 @@ fn should_succeed_handling_malformed_and_valid_txs() { }, _ => {} }; - return }); // Use block's hook for asserting expectations @@ -1014,10 +1015,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 1 => { // Inspecting the chain at round 1. @@ -1030,18 +1031,18 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the smart contract published let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::SmartContract(..) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::SmartContract(..) + )); } 2 => { // Inspecting the chain at round 2. @@ -1054,10 +1055,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 3 => { // Inspecting the chain at round 3. @@ -1070,10 +1071,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 4 => { // Inspecting the chain at round 4. @@ -1086,18 +1087,18 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); } _ => {} } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index b5140a06ee..6ae34fce42 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -200,9 +200,7 @@ impl TestSigningChannel { /// TODO: update to use signatures vec pub fn get_signature() -> Option> { let mut signer = TEST_SIGNING.lock().unwrap(); - let Some(sign_channels) = signer.as_mut() else { - return None; - }; + let sign_channels = signer.as_mut()?; let recv = sign_channels.recv.take().unwrap(); drop(signer); // drop signer so we don't hold the lock while receiving. let signatures = recv.recv_timeout(Duration::from_secs(30)).unwrap(); @@ -362,7 +360,7 @@ pub fn blind_signer_multinode( thread::sleep(Duration::from_secs(2)); info!("Checking for a block proposal to sign..."); last_count = cur_count; - let configs: Vec<&Config> = configs.iter().map(|x| x).collect(); + let configs: Vec<&Config> = configs.iter().collect(); match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { Ok(signed_block) => { if signed_blocks.contains(&signed_block) { @@ -427,10 +425,12 @@ pub fn get_latest_block_proposal( .collect(); proposed_blocks.sort_by(|(block_a, _, is_latest_a), (block_b, _, is_latest_b)| { - if block_a.header.chain_length > block_b.header.chain_length { - return std::cmp::Ordering::Greater; - } else if block_a.header.chain_length < block_b.header.chain_length { - return std::cmp::Ordering::Less; + let res = block_a + .header + .chain_length + .cmp(&block_b.header.chain_length); + if res != std::cmp::Ordering::Equal { + return res; } // the heights are tied, tie break with the latest miner if *is_latest_a { @@ -439,7 +439,7 @@ pub fn get_latest_block_proposal( if *is_latest_b { return std::cmp::Ordering::Less; } - return std::cmp::Ordering::Equal; + std::cmp::Ordering::Equal }); for (b, _, is_latest) in proposed_blocks.iter() { @@ -542,7 +542,7 @@ pub fn read_and_sign_block_proposal( channel .send(proposed_block.header.signer_signature) .unwrap(); - return Ok(signer_sig_hash); + Ok(signer_sig_hash) } /// Return a working nakamoto-neon config and the miner's bitcoin address to fund @@ -585,12 +585,12 @@ pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress burnchain.peer_host = Some("127.0.0.1".to_string()); } - conf.burnchain.magic_bytes = MagicBytes::from(['T' as u8, '3' as u8].as_ref()); + conf.burnchain.magic_bytes = MagicBytes::from([b'T', b'3'].as_ref()); conf.burnchain.poll_time_secs = 1; conf.node.pox_sync_sample_secs = 0; - conf.miner.first_attempt_time_ms = i64::max_value() as u64; - conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; // if there's just one node, then this must be true for tests to pass conf.miner.wait_for_block_download = false; @@ -709,7 +709,7 @@ pub fn next_block_and_wait_for_commits( coord_channels: &[&Arc>], commits_submitted: &[&Arc], ) -> Result<(), String> { - let commits_submitted: Vec<_> = commits_submitted.iter().cloned().collect(); + let commits_submitted: Vec<_> = commits_submitted.to_vec(); let blocks_processed_before: Vec<_> = coord_channels .iter() .map(|x| { @@ -786,7 +786,7 @@ pub fn setup_stacker(naka_conf: &mut Config) -> Secp256k1PrivateKey { let stacker_sk = Secp256k1PrivateKey::new(); let stacker_address = tests::to_addr(&stacker_sk); naka_conf.add_initial_balance( - PrincipalData::from(stacker_address.clone()).to_string(), + PrincipalData::from(stacker_address).to_string(), POX_4_DEFAULT_STACKER_BALANCE, ); stacker_sk @@ -813,17 +813,17 @@ pub fn boot_to_epoch_3( "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); let start_time = Instant::now(); loop { if start_time.elapsed() > Duration::from_secs(20) { panic!("Timed out waiting for the stacks height to increment") } - let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + let stacks_height = get_chain_info(naka_conf).stacks_tip_height; if stacks_height >= 1 { break; } @@ -840,13 +840,13 @@ pub fn boot_to_epoch_3( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -860,7 +860,7 @@ pub fn boot_to_epoch_3( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -900,9 +900,9 @@ pub fn boot_to_epoch_3( // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, reward_set_calculation, - &naka_conf, + naka_conf, ); // We need to vote on the aggregate public key if this test is self signing @@ -943,9 +943,9 @@ pub fn boot_to_epoch_3( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3.start_height - 1, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); @@ -975,17 +975,17 @@ pub fn boot_to_pre_epoch_3_boundary( "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); let start_time = Instant::now(); loop { if start_time.elapsed() > Duration::from_secs(20) { panic!("Timed out waiting for the stacks height to increment") } - let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + let stacks_height = get_chain_info(naka_conf).stacks_tip_height; if stacks_height >= 1 { break; } @@ -1002,13 +1002,13 @@ pub fn boot_to_pre_epoch_3_boundary( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -1022,7 +1022,7 @@ pub fn boot_to_pre_epoch_3_boundary( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -1062,9 +1062,9 @@ pub fn boot_to_pre_epoch_3_boundary( // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, reward_set_calculation, - &naka_conf, + naka_conf, ); // We need to vote on the aggregate public key if this test is self signing @@ -1105,9 +1105,9 @@ pub fn boot_to_pre_epoch_3_boundary( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3.start_height - 2, - &naka_conf, + naka_conf, ); info!("Bootstrapped to one block before Epoch 3.0 boundary, Epoch 2.x miner should continue for one more block"); @@ -1191,7 +1191,7 @@ pub fn is_key_set_for_cycle( is_mainnet: bool, http_origin: &str, ) -> Result { - let key = get_key_for_cycle(reward_cycle, is_mainnet, &http_origin)?; + let key = get_key_for_cycle(reward_cycle, is_mainnet, http_origin)?; Ok(key.is_some()) } @@ -1218,10 +1218,10 @@ pub fn setup_epoch_3_reward_set( let epoch_3_reward_cycle_boundary = epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // stack enough to activate pox-4 let block_height = btc_regtest_controller.get_headers_height(); @@ -1241,13 +1241,13 @@ pub fn setup_epoch_3_reward_set( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -1260,7 +1260,7 @@ pub fn setup_epoch_3_reward_set( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -1322,9 +1322,9 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3_reward_set_calculation_boundary, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch 3.0 reward set calculation boundary height: {epoch_3_reward_set_calculation_boundary}."); @@ -1364,9 +1364,9 @@ pub fn boot_to_epoch_25( ); run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_25_start_height, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch 2.5: {epoch_25_start_height}."); } @@ -1391,7 +1391,7 @@ pub fn boot_to_epoch_3_reward_set( btc_regtest_controller, num_stacking_cycles, ); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); info!( "Bootstrapped to Epoch 3.0 reward set calculation height: {}", get_chain_info(naka_conf).burn_block_height @@ -1426,7 +1426,7 @@ fn simple_neon_integration() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); let sender_sk = Secp256k1PrivateKey::new(); @@ -1435,16 +1435,13 @@ fn simple_neon_integration() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt * 2 + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1502,7 +1499,7 @@ fn simple_neon_integration() { #[cfg(feature = "monitoring_prom")] { wait_for(10, || { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1569,8 +1566,7 @@ fn simple_neon_integration() { .as_array() .unwrap() .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) }); Ok(transfer_tx_included) }) @@ -1598,17 +1594,13 @@ fn simple_neon_integration() { ); // assert that the transfer tx was observed - let transfer_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() - }) - .is_some(); + let transfer_tx_included = test_observer::get_blocks().into_iter().any(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + }); assert!( transfer_tx_included, @@ -1626,7 +1618,7 @@ fn simple_neon_integration() { #[cfg(feature = "monitoring_prom")] { wait_for(10, || { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1639,10 +1631,8 @@ fn simple_neon_integration() { tip.stacks_block_height ); - let expected_result_2 = format!( - "stacks_node_stacks_tip_height {}", - tip.stacks_block_height - 1 - ); + let expected_result_2 = + format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); Ok(res.contains(&expected_result_1) && res.contains(&expected_result_2)) }) .expect("Prometheus metrics did not update"); @@ -1676,7 +1666,7 @@ fn flash_blocks_on_epoch_3() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); @@ -1685,16 +1675,13 @@ fn flash_blocks_on_epoch_3() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt * 2 + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1852,17 +1839,13 @@ fn flash_blocks_on_epoch_3() { ); // assert that the transfer tx was observed - let transfer_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() - }) - .is_some(); + let transfer_tx_included = test_observer::get_blocks().into_iter().any(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + }); assert!( transfer_tx_included, @@ -1898,18 +1881,13 @@ fn flash_blocks_on_epoch_3() { // Verify that there's a gap of AT LEAST 3 blocks assert!( gap_end - gap_start + 1 >= 3, - "Expected a gap of AT LEAST 3 burn blocks due to flash blocks, found gap from {} to {}", - gap_start, - gap_end + "Expected a gap of AT LEAST 3 burn blocks due to flash blocks, found gap from {gap_start} to {gap_end}" ); // Verify that the gap includes the Epoch 3.0 activation height assert!( gap_start <= epoch_3_start_height && epoch_3_start_height <= gap_end, - "Expected the gap ({}..={}) to include the Epoch 3.0 activation height ({})", - gap_start, - gap_end, - epoch_3_start_height + "Expected the gap ({gap_start}..={gap_end}) to include the Epoch 3.0 activation height ({epoch_3_start_height})" ); // Verify blocks before and after the gap @@ -1918,7 +1896,7 @@ fn flash_blocks_on_epoch_3() { check_nakamoto_empty_block_heuristics(); info!("Verified burn block ranges, including expected gap for flash blocks"); - info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {}", epoch_3_start_height); + info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {epoch_3_start_height}"); coord_channel .lock() @@ -1957,13 +1935,10 @@ fn mine_multiple_per_tenure_integration() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1993,7 +1968,7 @@ fn mine_multiple_per_tenure_integration() { .spawn(move || run_loop.start(None, 0)) .unwrap(); wait_for_runloop(&blocks_processed); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); boot_to_epoch_3( &naka_conf, &blocks_processed, @@ -2028,7 +2003,7 @@ fn mine_multiple_per_tenure_integration() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("Mining tenure {}", tenure_ix); + debug!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -2145,22 +2120,19 @@ fn multiple_miners() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let mut conf_node_2 = naka_conf.clone(); let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.seed = vec![2, 2, 2, 2]; conf_node_2.burnchain.local_mining_public_key = Some( Keychain::default(conf_node_2.node.seed.clone()) @@ -2175,7 +2147,7 @@ fn multiple_miners() { let node_1_sk = Secp256k1PrivateKey::from_seed(&naka_conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), naka_conf.node.p2p_bind), @@ -2243,7 +2215,7 @@ fn multiple_miners() { .unwrap(); wait_for_runloop(&blocks_processed); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); boot_to_epoch_3( &naka_conf, &blocks_processed, @@ -2286,7 +2258,7 @@ fn multiple_miners() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -2386,9 +2358,9 @@ fn correct_burn_outs() { { let epochs = naka_conf.burnchain.epochs.as_mut().unwrap(); - let epoch_24_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch24).unwrap(); - let epoch_25_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap(); - let epoch_30_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap(); + let epoch_24_ix = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch24).unwrap(); + let epoch_25_ix = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch25).unwrap(); + let epoch_30_ix = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch30).unwrap(); epochs[epoch_24_ix].end_height = 208; epochs[epoch_25_ix].start_height = 208; epochs[epoch_25_ix].end_height = 225; @@ -2411,10 +2383,7 @@ fn correct_burn_outs() { let stacker_accounts = accounts[0..3].to_vec(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let signers = TestSigners::new(vec![sender_signer_sk]); @@ -2503,7 +2472,7 @@ fn correct_burn_outs() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&account.0).bytes, + tests::to_addr(account.0).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); @@ -2524,7 +2493,7 @@ fn correct_burn_outs() { .to_rsv(); let stacking_tx = tests::make_contract_call( - &account.0, + account.0, account.2.nonce, 1000, naka_conf.burnchain.chain_id, @@ -2586,7 +2555,7 @@ fn correct_burn_outs() { .block_height_to_reward_cycle(epoch_3.start_height) .unwrap(); - info!("first_epoch_3_cycle: {:?}", first_epoch_3_cycle); + info!("first_epoch_3_cycle: {first_epoch_3_cycle:?}"); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); let stacker_response = get_stacker_set(&http_origin, first_epoch_3_cycle).unwrap(); @@ -2732,10 +2701,7 @@ fn block_proposal_api_endpoint() { let stacker_sk = setup_stacker(&mut conf); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); // only subscribe to the block proposal events test_observer::spawn(); @@ -2760,7 +2726,7 @@ fn block_proposal_api_endpoint() { let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( &conf, @@ -2814,7 +2780,7 @@ fn block_proposal_api_endpoint() { .unwrap() .unwrap(); - let privk = conf.miner.mining_key.unwrap().clone(); + let privk = conf.miner.mining_key.unwrap(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()) .expect("Failed to get sortition tip"); let db_handle = sortdb.index_handle(&sort_tip); @@ -2910,41 +2876,41 @@ fn block_proposal_api_endpoint() { ("Must wait", sign(&proposal), HTTP_TOO_MANY, None), ( "Non-canonical or absent tenure", - (|| { + { let mut sp = sign(&proposal); sp.block.header.consensus_hash.0[3] ^= 0x07; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::NonCanonicalTenure)), ), ( "Corrupted (bit flipped after signing)", - (|| { + { let mut sp = sign(&proposal); sp.block.header.timestamp ^= 0x07; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::ChainstateError)), ), ( "Invalid `chain_id`", - (|| { + { let mut p = proposal.clone(); p.chain_id ^= 0xFFFFFFFF; sign(&p) - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::InvalidBlock)), ), ( "Invalid `miner_signature`", - (|| { + { let mut sp = sign(&proposal); sp.block.header.miner_signature.0[1] ^= 0x80; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::ChainstateError)), ), @@ -3042,10 +3008,7 @@ fn block_proposal_api_endpoint() { .iter() .zip(proposal_responses.iter()) { - info!( - "Received response {:?}, expecting {:?}", - &response, &expected_response - ); + info!("Received response {response:?}, expecting {expected_response:?}"); match expected_response { Ok(_) => { assert!(matches!(response, BlockValidateResponse::Ok(_))); @@ -3093,19 +3056,16 @@ fn miner_writes_proposed_block_to_stackerdb() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); let stacker_sk = setup_stacker(&mut naka_conf); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); test_observer::spawn(); test_observer::register( @@ -3216,9 +3176,9 @@ fn vote_for_aggregate_key_burn_op() { let signer_sk = Secp256k1PrivateKey::new(); let signer_addr = tests::to_addr(&signer_sk); - let mut signers = TestSigners::new(vec![signer_sk.clone()]); + let mut signers = TestSigners::new(vec![signer_sk]); - naka_conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + naka_conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -3277,7 +3237,7 @@ fn vote_for_aggregate_key_burn_op() { let mut miner_signer = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting pre-stx op"); let pre_stx_op = PreStxOp { - output: signer_addr.clone(), + output: signer_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -3343,13 +3303,13 @@ fn vote_for_aggregate_key_burn_op() { let stacker_pk = StacksPublicKey::from_private(&stacker_sk); let signer_key: StacksPublicKeyBuffer = stacker_pk.to_bytes_compressed().as_slice().into(); - let aggregate_key = signer_key.clone(); + let aggregate_key = signer_key; let vote_for_aggregate_key_op = BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { signer_key, signer_index, - sender: signer_addr.clone(), + sender: signer_addr, round: 0, reward_cycle, aggregate_key, @@ -3360,7 +3320,7 @@ fn vote_for_aggregate_key_burn_op() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut signer_burnop_signer = BurnchainOpSigner::new(signer_sk.clone(), false); + let mut signer_burnop_signer = BurnchainOpSigner::new(signer_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -3393,10 +3353,10 @@ fn vote_for_aggregate_key_burn_op() { for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { - info!("Found a burn op: {:?}", tx); + info!("Found a burn op: {tx:?}"); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); if !burnchain_op.contains_key("vote_for_aggregate_key") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); + warn!("Got unexpected burnchain op: {burnchain_op:?}"); panic!("unexpected btc transaction type"); } let vote_obj = burnchain_op.get("vote_for_aggregate_key").unwrap(); @@ -3446,7 +3406,7 @@ fn follower_bootup() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3455,13 +3415,10 @@ fn follower_bootup() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -3574,7 +3531,7 @@ fn follower_bootup() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("follower_bootup: Miner runs tenure {}", tenure_ix); + debug!("follower_bootup: Miner runs tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -3582,10 +3539,7 @@ fn follower_bootup() { let mut last_tip = BlockHeaderHash([0x00; 32]); let mut last_nonce = None; - debug!( - "follower_bootup: Miner mines interum blocks for tenure {}", - tenure_ix - ); + debug!("follower_bootup: Miner mines interum blocks for tenure {tenure_ix}"); // mine the interim blocks for _ in 0..inter_blocks_per_tenure { @@ -3621,8 +3575,8 @@ fn follower_bootup() { let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); - debug!("follower_bootup: Miner account: {:?}", &account); - debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + debug!("follower_bootup: Miner account: {account:?}"); + debug!("follower_bootup: Miner sent {}: {tx:?}", &tx.txid()); let now = get_epoch_time_secs(); while get_epoch_time_secs() < now + 10 { @@ -3767,13 +3721,13 @@ fn follower_bootup_across_multiple_cycles() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.node.pox_sync_sample_secs = 30; + naka_conf.node.pox_sync_sample_secs = 180; naka_conf.burnchain.max_rbf = 10_000_000; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3782,13 +3736,10 @@ fn follower_bootup_across_multiple_cycles() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -3972,7 +3923,7 @@ fn follower_bootup_custom_chain_id() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3981,13 +3932,10 @@ fn follower_bootup_custom_chain_id() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -4100,7 +4048,7 @@ fn follower_bootup_custom_chain_id() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("follower_bootup: Miner runs tenure {}", tenure_ix); + debug!("follower_bootup: Miner runs tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -4108,10 +4056,7 @@ fn follower_bootup_custom_chain_id() { let mut last_tip = BlockHeaderHash([0x00; 32]); let mut last_nonce = None; - debug!( - "follower_bootup: Miner mines interum blocks for tenure {}", - tenure_ix - ); + debug!("follower_bootup: Miner mines interum blocks for tenure {tenure_ix}"); // mine the interim blocks for _ in 0..inter_blocks_per_tenure { @@ -4147,8 +4092,8 @@ fn follower_bootup_custom_chain_id() { let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); - debug!("follower_bootup: Miner account: {:?}", &account); - debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + debug!("follower_bootup: Miner account: {account:?}"); + debug!("follower_bootup: Miner sent {}: {tx:?}", &tx.txid()); let now = get_epoch_time_secs(); while get_epoch_time_secs() < now + 10 { @@ -4326,23 +4271,14 @@ fn burn_ops_integration_test() { let sender_addr = tests::to_addr(&sender_sk); let mut sender_nonce = 0; - let mut signers = TestSigners::new(vec![signer_sk_1.clone()]); + let mut signers = TestSigners::new(vec![signer_sk_1]); let stacker_sk = setup_stacker(&mut naka_conf); // Add the initial balances to the other accounts - naka_conf.add_initial_balance( - PrincipalData::from(stacker_addr_1.clone()).to_string(), - 1000000, - ); - naka_conf.add_initial_balance( - PrincipalData::from(stacker_addr_2.clone()).to_string(), - 1000000, - ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), - 100_000_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(stacker_addr_1).to_string(), 1000000); + naka_conf.add_initial_balance(PrincipalData::from(stacker_addr_2).to_string(), 1000000); + naka_conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 100_000_000); test_observer::spawn(); test_observer::register_any(&mut naka_conf); @@ -4395,7 +4331,7 @@ fn burn_ops_integration_test() { info!("Submitting first pre-stx op"); let pre_stx_op = PreStxOp { - output: signer_addr_1.clone(), + output: signer_addr_1, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -4426,7 +4362,7 @@ fn burn_ops_integration_test() { let mut miner_signer_2 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting second pre-stx op"); let pre_stx_op_2 = PreStxOp { - output: signer_addr_2.clone(), + output: signer_addr_2, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -4448,7 +4384,7 @@ fn burn_ops_integration_test() { let mut miner_signer_3 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting third pre-stx op"); let pre_stx_op_3 = PreStxOp { - output: stacker_addr_1.clone(), + output: stacker_addr_1, txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, @@ -4469,7 +4405,7 @@ fn burn_ops_integration_test() { info!("Submitting fourth pre-stx op"); let mut miner_signer_4 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); let pre_stx_op_4 = PreStxOp { - output: stacker_addr_2.clone(), + output: stacker_addr_2, txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, @@ -4566,10 +4502,10 @@ fn burn_ops_integration_test() { "reward_cycle" => reward_cycle, ); - let mut signer_burnop_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); - let mut signer_burnop_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); - let mut stacker_burnop_signer_1 = BurnchainOpSigner::new(stacker_sk_1.clone(), false); - let mut stacker_burnop_signer_2 = BurnchainOpSigner::new(stacker_sk_2.clone(), false); + let mut signer_burnop_signer_1 = BurnchainOpSigner::new(signer_sk_1, false); + let mut signer_burnop_signer_2 = BurnchainOpSigner::new(signer_sk_2, false); + let mut stacker_burnop_signer_1 = BurnchainOpSigner::new(stacker_sk_1, false); + let mut stacker_burnop_signer_2 = BurnchainOpSigner::new(stacker_sk_2, false); info!( "Before stack-stx op, signer 1 total: {}", @@ -4603,8 +4539,8 @@ fn burn_ops_integration_test() { info!("Submitting transfer STX op"); let transfer_stx_op = TransferStxOp { - sender: stacker_addr_1.clone(), - recipient: stacker_addr_2.clone(), + sender: stacker_addr_1, + recipient: stacker_addr_2, transfered_ustx: 10000, memo: vec![], txid: Txid([0u8; 32]), @@ -4626,8 +4562,8 @@ fn burn_ops_integration_test() { info!("Submitting delegate STX op"); let del_stx_op = DelegateStxOp { - sender: stacker_addr_2.clone(), - delegate_to: stacker_addr_1.clone(), + sender: stacker_addr_2, + delegate_to: stacker_addr_1, reward_addr: None, delegated_ustx: 100_000, // to be filled in @@ -4654,7 +4590,7 @@ fn burn_ops_integration_test() { let min_stx = pox_info.next_cycle.min_threshold_ustx; let stack_stx_op_with_some_signer_key = StackStxOp { - sender: signer_addr_1.clone(), + sender: signer_addr_1, reward_addr: pox_addr, stacked_ustx: min_stx.into(), num_cycles: lock_period, @@ -4681,7 +4617,7 @@ fn burn_ops_integration_test() { ); let stack_stx_op_with_no_signer_key = StackStxOp { - sender: signer_addr_2.clone(), + sender: signer_addr_2, reward_addr: PoxAddress::Standard(signer_addr_2, None), stacked_ustx: 100000, num_cycles: 6, @@ -4766,7 +4702,7 @@ fn burn_ops_integration_test() { for tx in transactions.iter().rev() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { - info!("Found a burn op: {:?}", tx); + info!("Found a burn op: {tx:?}"); assert!(block_has_tenure_change, "Block should have a tenure change"); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); if burnchain_op.contains_key("transfer_stx") { @@ -4784,15 +4720,14 @@ fn burn_ops_integration_test() { assert_eq!(recipient, stacker_addr_2.to_string()); assert_eq!(transfered_ustx, 10000); info!( - "Transfer STX op: sender: {}, recipient: {}, transfered_ustx: {}", - sender, recipient, transfered_ustx + "Transfer STX op: sender: {sender}, recipient: {recipient}, transfered_ustx: {transfered_ustx}" ); assert!(!transfer_stx_found, "Transfer STX op should be unique"); transfer_stx_found = true; continue; } if burnchain_op.contains_key("delegate_stx") { - info!("Got delegate STX op: {:?}", burnchain_op); + info!("Got delegate STX op: {burnchain_op:?}"); let delegate_stx_obj = burnchain_op.get("delegate_stx").unwrap(); let sender_obj = delegate_stx_obj.get("sender").unwrap(); let sender = sender_obj.get("address").unwrap().as_str().unwrap(); @@ -4811,7 +4746,7 @@ fn burn_ops_integration_test() { continue; } if !burnchain_op.contains_key("stack_stx") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); + warn!("Got unexpected burnchain op: {burnchain_op:?}"); panic!("unexpected btc transaction type"); } let stack_stx_obj = burnchain_op.get("stack_stx").unwrap(); @@ -4882,7 +4817,7 @@ fn burn_ops_integration_test() { for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh).unwrap(); for stacking_op in stacking_ops.into_iter() { - debug!("Stacking op queried from sortdb: {:?}", stacking_op); + debug!("Stacking op queried from sortdb: {stacking_op:?}"); match stacking_op.signer_key { Some(_) => found_some = true, None => found_none = true, @@ -4931,23 +4866,21 @@ fn forked_tenure_is_ignored() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(10); + naka_conf.miner.block_commit_delay = Duration::from_secs(0); let sender_sk = Secp256k1PrivateKey::new(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -5051,7 +4984,7 @@ fn forked_tenure_is_ignored() { // Unpause the broadcast of Tenure B's block, do not submit commits, and do not allow blocks to // be processed - test_skip_commit_op.0.lock().unwrap().replace(true); + test_skip_commit_op.set(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); // Wait for a stacks block to be broadcasted. @@ -5075,7 +5008,7 @@ fn forked_tenure_is_ignored() { .nakamoto_blocks_db() .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() - .get(0) + .first() .cloned() .unwrap(); @@ -5104,7 +5037,7 @@ fn forked_tenure_is_ignored() { .expect("Mutex poisoned") .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || { - test_skip_commit_op.0.lock().unwrap().replace(false); + test_skip_commit_op.set(false); TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); @@ -5304,13 +5237,10 @@ fn check_block_heights() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -5394,12 +5324,12 @@ fn check_block_heights() { vec![], ); let preheights = heights0_value.expect_tuple().unwrap(); - info!("Heights from pre-epoch 3.0: {}", preheights); + info!("Heights from pre-epoch 3.0: {preheights}"); wait_for_first_naka_block_commit(60, &commits_submitted); let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); // With the first Nakamoto block, the chain tip and the number of tenures // must be the same (before Nakamoto every block counts as a tenure) @@ -5417,7 +5347,7 @@ fn check_block_heights() { vec![], ); let heights0 = heights0_value.expect_tuple().unwrap(); - info!("Heights from epoch 3.0 start: {}", heights0); + info!("Heights from epoch 3.0 start: {heights0}"); assert_eq!( heights0.get("burn-block-height"), preheights.get("burn-block-height"), @@ -5466,7 +5396,7 @@ fn check_block_heights() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -5488,7 +5418,7 @@ fn check_block_heights() { vec![], ); let heights1 = heights1_value.expect_tuple().unwrap(); - info!("Heights from Clarity 1: {}", heights1); + info!("Heights from Clarity 1: {heights1}"); let heights3_value = call_read_only( &naka_conf, @@ -5498,7 +5428,7 @@ fn check_block_heights() { vec![], ); let heights3 = heights3_value.expect_tuple().unwrap(); - info!("Heights from Clarity 3: {}", heights3); + info!("Heights from Clarity 3: {heights3}"); let bbh1 = heights1 .get("burn-block-height") @@ -5598,7 +5528,7 @@ fn check_block_heights() { vec![], ); let heights1 = heights1_value.expect_tuple().unwrap(); - info!("Heights from Clarity 1: {}", heights1); + info!("Heights from Clarity 1: {heights1}"); let heights3_value = call_read_only( &naka_conf, @@ -5608,7 +5538,7 @@ fn check_block_heights() { vec![], ); let heights3 = heights3_value.expect_tuple().unwrap(); - info!("Heights from Clarity 3: {}", heights3); + info!("Heights from Clarity 3: {heights3}"); let bbh1 = heights1 .get("burn-block-height") @@ -5723,17 +5653,11 @@ fn nakamoto_attempt_time() { let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), - 1_000_000_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 1_000_000_000); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100_000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -5849,7 +5773,7 @@ fn nakamoto_attempt_time() { // mine the interim blocks for tenure_count in 0..inter_blocks_per_tenure { - debug!("nakamoto_attempt_time: begin tenure {}", tenure_count); + debug!("nakamoto_attempt_time: begin tenure {tenure_count}"); let blocks_processed_before = coord_channel .lock() @@ -5987,8 +5911,7 @@ fn nakamoto_attempt_time() { break 'submit_txs; } info!( - "nakamoto_times_ms: on account {}; sent {} txs so far (out of {})", - acct_idx, tx_count, tx_limit + "nakamoto_times_ms: on account {acct_idx}; sent {tx_count} txs so far (out of {tx_limit})" ); } acct_idx += 1; @@ -6056,13 +5979,10 @@ fn clarity_burn_state() { let tx_fee = 1000; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), deploy_fee + tx_fee * tenure_count + tx_fee * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -6139,7 +6059,7 @@ fn clarity_burn_state() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); // Don't submit this tx on the first iteration, because the contract is not published yet. if tenure_ix > 0 { @@ -6200,7 +6120,7 @@ fn clarity_burn_state() { let info = get_chain_info(&naka_conf); burn_block_height = info.burn_block_height as u128; - info!("Expecting burn block height to be {}", burn_block_height); + info!("Expecting burn block height to be {burn_block_height}"); // Assert that the contract call was successful test_observer::get_mined_nakamoto_blocks() @@ -6215,11 +6135,11 @@ fn clarity_burn_state() { return; } - info!("Contract call result: {}", result); + info!("Contract call result: {result}"); result.clone().expect_result_ok().expect("Ok result"); } _ => { - info!("Unsuccessful event: {:?}", event); + info!("Unsuccessful event: {event:?}"); panic!("Expected a successful transaction"); } }); @@ -6241,7 +6161,7 @@ fn clarity_burn_state() { "foo", vec![&expected_height], ); - info!("Read-only result: {:?}", result); + info!("Read-only result: {result:?}"); result.expect_result_ok().expect("Read-only call failed"); // Submit a tx to trigger the next block @@ -6277,11 +6197,11 @@ fn clarity_burn_state() { .iter() .for_each(|event| match event { TransactionEvent::Success(TransactionSuccessEvent { result, .. }) => { - info!("Contract call result: {}", result); + info!("Contract call result: {result}"); result.clone().expect_result_ok().expect("Ok result"); } _ => { - info!("Unsuccessful event: {:?}", event); + info!("Unsuccessful event: {event:?}"); panic!("Expected a successful transaction"); } }); @@ -6307,6 +6227,7 @@ fn clarity_burn_state() { #[test] #[ignore] +#[allow(clippy::drop_non_drop)] fn signer_chainstate() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -6314,7 +6235,7 @@ fn signer_chainstate() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -6324,15 +6245,12 @@ fn signer_chainstate() { let send_amt = 1000; let send_fee = 200; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * 20, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -6388,7 +6306,7 @@ fn signer_chainstate() { .unwrap() .unwrap() .stacks_block_height; - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -6450,6 +6368,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); @@ -6588,6 +6507,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -6622,10 +6542,10 @@ fn signer_chainstate() { valid: Some(true), signed_over: true, proposed_time: get_epoch_time_secs(), - signed_self: None, - signed_group: None, + signed_self: Some(get_epoch_time_secs()), + signed_group: Some(get_epoch_time_secs()), ext: ExtraBlockInfo::None, - state: BlockState::Unprocessed, + state: BlockState::GloballyAccepted, }) .unwrap(); @@ -6639,13 +6559,13 @@ fn signer_chainstate() { // Case: the block doesn't confirm the prior blocks that have been signed. let last_tenure = &last_tenures_proposals.as_ref().unwrap().1.clone(); let last_tenure_header = &last_tenure.header; - let miner_sk = naka_conf.miner.mining_key.clone().unwrap(); + let miner_sk = naka_conf.miner.mining_key.unwrap(); let miner_pk = StacksPublicKey::from_private(&miner_sk); let mut sibling_block_header = NakamotoBlockHeader { version: 1, chain_length: last_tenure_header.chain_length, burn_spent: last_tenure_header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: last_tenure_header.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6665,6 +6585,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) @@ -6693,8 +6614,8 @@ fn signer_chainstate() { version: 1, chain_length: last_tenure_header.chain_length, burn_spent: last_tenure_header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), - parent_block_id: last_tenure_header.parent_block_id.clone(), + consensus_hash: last_tenure_header.consensus_hash, + parent_block_id: last_tenure_header.parent_block_id, tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), timestamp: last_tenure_header.timestamp + 1, @@ -6751,7 +6672,7 @@ fn signer_chainstate() { version: 1, chain_length: reorg_to_block.header.chain_length + 1, burn_spent: reorg_to_block.header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: reorg_to_block.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6782,9 +6703,9 @@ fn signer_chainstate() { post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], payload: TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), - prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), - burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + tenure_consensus_hash: sibling_block_header.consensus_hash, + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash, + burn_view_consensus_hash: sibling_block_header.consensus_hash, previous_tenure_end: reorg_to_block.block_id(), previous_tenure_blocks: 1, cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, @@ -6812,12 +6733,12 @@ fn signer_chainstate() { // Case: the block contains a tenure change, but the parent tenure is a reorg let reorg_to_block = first_tenure_blocks.as_ref().unwrap().last().unwrap(); // make the sortition_view *think* that our block commit pointed at this old tenure - sortitions_view.cur_sortition.parent_tenure_id = reorg_to_block.header.consensus_hash.clone(); + sortitions_view.cur_sortition.parent_tenure_id = reorg_to_block.header.consensus_hash; let mut sibling_block_header = NakamotoBlockHeader { version: 1, chain_length: reorg_to_block.header.chain_length + 1, burn_spent: reorg_to_block.header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: reorg_to_block.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6848,9 +6769,9 @@ fn signer_chainstate() { post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], payload: TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), - prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), - burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + tenure_consensus_hash: sibling_block_header.consensus_hash, + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash, + burn_view_consensus_hash: sibling_block_header.consensus_hash, previous_tenure_end: reorg_to_block.block_id(), previous_tenure_blocks: 1, cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, @@ -6889,7 +6810,7 @@ fn signer_chainstate() { // every step of the return should be linked to the parent let mut prior: Option<&TenureForkingInfo> = None; for step in fork_info.iter().rev() { - if let Some(ref prior) = prior { + if let Some(prior) = prior { assert_eq!(prior.sortition_id, step.parent_sortition_id); } prior = Some(step); @@ -6928,7 +6849,7 @@ fn continue_tenure_extend() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let http_origin = naka_conf.node.data_url.clone(); @@ -6938,15 +6859,12 @@ fn continue_tenure_extend() { let send_amt = 1000; let send_fee = 200; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * 20, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let mut transfer_nonce = 0; @@ -7005,7 +6923,7 @@ fn continue_tenure_extend() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -7054,7 +6972,7 @@ fn continue_tenure_extend() { .get_stacks_blocks_processed(); info!("Pausing commit ops to trigger a tenure extend."); - test_skip_commit_op.0.lock().unwrap().replace(true); + test_skip_commit_op.set(true); next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); @@ -7153,7 +7071,7 @@ fn continue_tenure_extend() { } info!("Resuming commit ops to mine regular tenures."); - test_skip_commit_op.0.lock().unwrap().replace(false); + test_skip_commit_op.set(false); // Mine 15 more regular nakamoto tenures for _i in 0..15 { @@ -7194,7 +7112,7 @@ fn continue_tenure_extend() { let mut has_extend = false; for tx in block["transactions"].as_array().unwrap() { let raw_tx = tx["raw_tx"].as_str().unwrap(); - if raw_tx == &transfer_tx_hex { + if raw_tx == transfer_tx_hex { transfer_tx_included = true; continue; } @@ -7203,8 +7121,9 @@ fn continue_tenure_extend() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => match payload.cause { + + if let TransactionPayload::TenureChange(payload) = &parsed.payload { + match payload.cause { TenureChangeCause::Extended => { has_extend = true; tenure_extends.push(parsed); @@ -7215,9 +7134,8 @@ fn continue_tenure_extend() { } tenure_block_founds.push(parsed); } - }, - _ => {} - }; + }; + } } last_block_had_extend = has_extend; } @@ -7242,7 +7160,7 @@ fn continue_tenure_extend() { // make sure prometheus returns an updated height #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -7280,8 +7198,8 @@ fn get_block_times( info!("Getting block times at block {block_height}, tenure {tenure_height}..."); let time0_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract0_name, "get-time", vec![&clarity::vm::Value::UInt(tenure_height)], @@ -7294,8 +7212,8 @@ fn get_block_times( .unwrap(); let time_now0_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract0_name, "get-last-time", vec![], @@ -7308,8 +7226,8 @@ fn get_block_times( .unwrap(); let time1_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract1_name, "get-time", vec![&clarity::vm::Value::UInt(tenure_height)], @@ -7322,8 +7240,8 @@ fn get_block_times( .unwrap(); let time1_now_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract1_name, "get-last-time", vec![], @@ -7336,8 +7254,8 @@ fn get_block_times( .unwrap(); let time3_tenure_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-tenure-time", vec![&clarity::vm::Value::UInt(block_height)], @@ -7350,8 +7268,8 @@ fn get_block_times( .unwrap(); let time3_block_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-block-time", vec![&clarity::vm::Value::UInt(block_height)], @@ -7364,8 +7282,8 @@ fn get_block_times( .unwrap(); let time3_now_tenure_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-last-tenure-time", vec![], @@ -7432,13 +7350,10 @@ fn check_block_times() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 12, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -7528,7 +7443,7 @@ fn check_block_times() { .unwrap() .expect_u128() .unwrap(); - info!("Time from pre-epoch 3.0: {}", time0); + info!("Time from pre-epoch 3.0: {time0}"); // This version uses the Clarity 1 / 2 function let contract1_name = "test-contract-1"; @@ -7833,13 +7748,10 @@ fn check_block_info() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 2, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let contract3_name = "test-contract-3"; @@ -7971,7 +7883,7 @@ fn check_block_info() { blind_signer(&naka_conf, &signers, proposals_submitted); let c0_block_ht_1_pre_3 = get_block_info(contract0_name, 1); - info!("Info from pre-epoch 3.0: {:?}", c0_block_ht_1_pre_3); + info!("Info from pre-epoch 3.0: {c0_block_ht_1_pre_3:?}"); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -8039,7 +7951,7 @@ fn check_block_info() { // one in the tenure) let info = get_chain_info(&naka_conf); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let last_stacks_block_height = info.stacks_tip_height as u128; let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let last_tenure_height: u128 = @@ -8062,7 +7974,7 @@ fn check_block_info() { .unwrap(); let info = get_chain_info(&naka_conf); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let cur_stacks_block_height = info.stacks_tip_height as u128; let cur_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let cur_tenure_height: u128 = @@ -8381,7 +8293,7 @@ fn check_block_info() { run_loop_thread.join().unwrap(); } -fn get_expected_reward_for_height(blocks: &Vec, block_height: u128) -> u128 { +fn get_expected_reward_for_height(blocks: &[serde_json::Value], block_height: u128) -> u128 { // Find the target block let target_block = blocks .iter() @@ -8468,13 +8380,10 @@ fn check_block_info_rewards() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 2, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -8560,7 +8469,7 @@ fn check_block_info_rewards() { blind_signer(&naka_conf, &signers, proposals_submitted); let tuple0 = get_block_info(contract0_name, 1); - info!("Info from pre-epoch 3.0: {:?}", tuple0); + info!("Info from pre-epoch 3.0: {tuple0:?}"); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -8672,7 +8581,7 @@ fn check_block_info_rewards() { } let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let (chainstate, _) = StacksChainState::open( naka_conf.is_mainnet(), naka_conf.burnchain.chain_id, @@ -8705,7 +8614,7 @@ fn check_block_info_rewards() { } let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let last_stacks_block_height = info.stacks_tip_height as u128; let blocks = test_observer::get_blocks(); @@ -8797,7 +8706,7 @@ fn mock_mining() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 3; let inter_blocks_per_tenure = 3; // setup sender + recipient for some test stx transfers @@ -8819,13 +8728,10 @@ fn mock_mining() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -9009,18 +8915,22 @@ fn mock_mining() { Ok(follower_naka_mined_blocks.load(Ordering::SeqCst) > follower_naka_mined_blocks_before) }) - .expect(&format!( - "Timed out waiting for mock miner block {}", - follower_naka_mined_blocks_before + 1 - )); + .unwrap_or_else(|_| { + panic!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + ) + }); wait_for(20, || { Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) }) - .expect(&format!( - "Timed out waiting for mock miner block {}", - follower_naka_mined_blocks_before + 1 - )); + .unwrap_or_else(|_| { + panic!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + ) + }); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -9046,9 +8956,7 @@ fn mock_mining() { let blocks_mock_mined = mock_mining_blocks_end - mock_mining_blocks_start; assert!( blocks_mock_mined >= tenure_count, - "Should have mock mined at least `tenure_count` nakamoto blocks. Mined = {}. Expected = {}", - blocks_mock_mined, - tenure_count, + "Should have mock mined at least `tenure_count` nakamoto blocks. Mined = {blocks_mock_mined}. Expected = {tenure_count}" ); // wait for follower to reach the chain tip @@ -9093,8 +9001,8 @@ fn utxo_check_on_startup_panic() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - println!("Nakamoto node started with config: {:?}", naka_conf); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + println!("Nakamoto node started with config: {naka_conf:?}"); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); @@ -9169,8 +9077,8 @@ fn utxo_check_on_startup_recover() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - println!("Nakamoto node started with config: {:?}", naka_conf); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + println!("Nakamoto node started with config: {naka_conf:?}"); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); @@ -9244,10 +9152,10 @@ fn v3_signer_api_endpoint() { let send_amt = 100; let send_fee = 180; conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); - conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); // only subscribe to the block proposal events @@ -9273,7 +9181,7 @@ fn v3_signer_api_endpoint() { let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); - let mut signers = TestSigners::new(vec![signer_sk.clone()]); + let mut signers = TestSigners::new(vec![signer_sk]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( &conf, @@ -9407,7 +9315,7 @@ fn skip_mining_long_tx() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.miner.nakamoto_attempt_time_ms = 5_000; @@ -9419,20 +9327,14 @@ fn skip_mining_long_tx() { let send_amt = 1000; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_1_addr.clone()).to_string(), + PrincipalData::from(sender_1_addr).to_string(), send_amt * 15 + send_fee * 15, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_2_addr.clone()).to_string(), - 10000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_2_addr).to_string(), 10000); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -9488,7 +9390,7 @@ fn skip_mining_long_tx() { wait_for_first_naka_block_commit(60, &commits_submitted); // submit a long running TX and the transfer TX - let input_list: Vec<_> = (1..100u64).into_iter().map(|x| x.to_string()).collect(); + let input_list: Vec<_> = (1..100u64).map(|x| x.to_string()).collect(); let input_list = input_list.join(" "); // Mine a few nakamoto tenures with some interim blocks in them diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index d6373a3b44..167a66f7db 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -158,7 +158,7 @@ fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAdd .unwrap() .burnchain .magic_bytes; - assert_eq!(magic_bytes.as_bytes(), &['T' as u8, '2' as u8]); + assert_eq!(magic_bytes.as_bytes(), b"T2"); conf.burnchain.magic_bytes = magic_bytes; conf.burnchain.poll_time_secs = 1; conf.node.pox_sync_sample_secs = 0; @@ -391,7 +391,7 @@ pub mod test_observer { let new_rawtxs = txs .as_array() .unwrap() - .into_iter() + .iter() .map(|x| x.as_str().unwrap().to_string()); let mut memtxs = MEMTXS.lock().unwrap(); for new_tx in new_rawtxs { @@ -408,7 +408,7 @@ pub mod test_observer { .unwrap() .as_array() .unwrap() - .into_iter() + .iter() .map(|x| x.as_str().unwrap().to_string()); let reason = txs.get("reason").unwrap().as_str().unwrap().to_string(); @@ -622,8 +622,7 @@ pub mod test_observer { // Find indexes in range for which we don't have burn block in set let missing = (start..=end) - .into_iter() - .filter(|i| !burn_block_heights.contains(&i)) + .filter(|i| !burn_block_heights.contains(i)) .collect::>(); if missing.is_empty() { @@ -664,9 +663,8 @@ pub fn next_block_and_wait_with_timeout( ) -> bool { let current = blocks_processed.load(Ordering::SeqCst); info!( - "Issuing block at {}, waiting for bump ({})", - get_epoch_time_secs(), - current + "Issuing block at {}, waiting for bump ({current})", + get_epoch_time_secs() ); btc_controller.build_next_block(1); let start = Instant::now(); @@ -693,9 +691,8 @@ pub fn next_block_and_iterate( ) -> bool { let current = blocks_processed.load(Ordering::SeqCst); eprintln!( - "Issuing block at {}, waiting for bump ({})", - get_epoch_time_secs(), - current + "Issuing block at {}, waiting for bump ({current})", + get_epoch_time_secs() ); btc_controller.build_next_block(1); let start = Instant::now(); @@ -725,20 +722,19 @@ pub fn run_until_burnchain_height( target_height: u64, conf: &Config, ) -> bool { - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); let mut current_height = tip_info.burn_block_height; while current_height < target_height { eprintln!( - "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({})", + "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({current_height})", get_epoch_time_secs(), - current_height ); - let next_result = next_block_and_wait(btc_regtest_controller, &blocks_processed); + let next_result = next_block_and_wait(btc_regtest_controller, blocks_processed); if !next_result { return false; } - let Ok(tip_info) = get_chain_info_result(&conf) else { + let Ok(tip_info) = get_chain_info_result(conf) else { sleep_ms(1000); continue; }; @@ -764,15 +760,12 @@ pub fn wait_for_runloop(blocks_processed: &Arc) { pub fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64) -> bool { let mut current = microblocks_processed.load(Ordering::SeqCst); let start = Instant::now(); - info!("Waiting for next microblock (current = {})", ¤t); + info!("Waiting for next microblock (current = {current})"); loop { let now = microblocks_processed.load(Ordering::SeqCst); if now == 0 && current != 0 { // wrapped around -- a new epoch started - info!( - "New microblock epoch started while waiting (originally {})", - current - ); + info!("New microblock epoch started while waiting (originally {current})"); current = 0; } @@ -781,24 +774,24 @@ pub fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64 } if start.elapsed() > Duration::from_secs(timeout) { - warn!("Timed out waiting for microblocks to process ({})", timeout); + warn!("Timed out waiting for microblocks to process ({timeout})"); return false; } thread::sleep(Duration::from_millis(100)); } info!("Next microblock acknowledged"); - return true; + true } /// returns Txid string upon success -pub fn submit_tx_fallible(http_origin: &str, tx: &Vec) -> Result { +pub fn submit_tx_fallible(http_origin: &str, tx: &[u8]) -> Result { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions", http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(tx.clone()) + .body(tx.to_vec()) .send() .unwrap(); if res.status().is_success() { @@ -817,16 +810,16 @@ pub fn submit_tx_fallible(http_origin: &str, tx: &Vec) -> Result) -> String { +pub fn submit_tx(http_origin: &str, tx: &[u8]) -> String { submit_tx_fallible(http_origin, tx).unwrap_or_else(|e| { - eprintln!("Submit tx error: {}", e); + eprintln!("Submit tx error: {e}"); panic!(""); }) } pub fn get_unconfirmed_tx(http_origin: &str, txid: &Txid) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions/unconfirmed/{}", http_origin, txid); + let path = format!("{http_origin}/v2/transactions/unconfirmed/{txid}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -840,14 +833,14 @@ pub fn get_unconfirmed_tx(http_origin: &str, txid: &Txid) -> Option { pub fn submit_block( http_origin: &str, consensus_hash: &ConsensusHash, - block: &Vec, + block: &[u8], ) -> StacksBlockAcceptedData { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/upload/{}", http_origin, consensus_hash); + let path = format!("{http_origin}/v2/blocks/upload/{consensus_hash}"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(block.clone()) + .body(block.to_owned()) .send() .unwrap(); @@ -862,21 +855,21 @@ pub fn submit_block( .block_hash() ) ); - return res; + res } else { eprintln!("{}", res.text().unwrap()); panic!(""); } } -pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash { +pub fn submit_microblock(http_origin: &str, mblock: &[u8]) -> BlockHeaderHash { let client = reqwest::blocking::Client::new(); let microblock = StacksMicroblock::consensus_deserialize(&mut &mblock[..]).unwrap(); - let path = format!("{}/v2/microblocks/{}", http_origin, microblock.block_hash()); + let path = format!("{http_origin}/v2/microblocks/{}", microblock.block_hash()); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(mblock.clone()) + .body(mblock.to_owned()) .send() .unwrap(); @@ -888,7 +881,7 @@ pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash .unwrap() .block_hash() ); - return res; + res } else { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -897,7 +890,7 @@ pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash pub fn get_block(http_origin: &str, block_id: &StacksBlockId) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/{}", http_origin, block_id); + let path = format!("{http_origin}/v2/blocks/{block_id}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -939,7 +932,7 @@ pub fn get_tip_anchored_block(conf: &Config) -> (ConsensusHash, StacksBlock) { // get the associated anchored block let http_origin = format!("http://{}", &conf.node.rpc_bind); let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/{}", &http_origin, &stacks_id_tip); + let path = format!("{http_origin}/v2/blocks/{stacks_id_tip}"); let block_bytes = client.get(&path).send().unwrap().bytes().unwrap(); let block = StacksBlock::consensus_deserialize(&mut block_bytes.as_ref()).unwrap(); @@ -972,10 +965,7 @@ pub fn call_read_only( info!("Call read only: {contract}.{function}({args:?})"); - let path = format!( - "{http_origin}/v2/contracts/call-read/{}/{}/{}", - principal, contract, function - ); + let path = format!("{http_origin}/v2/contracts/call-read/{principal}/{contract}/{function}"); let serialized_args = args .iter() @@ -1005,14 +995,13 @@ fn find_microblock_privkey( let mut keychain = Keychain::default(conf.node.seed.clone()); for ix in 0..max_tries { // the first rotation occurs at 203. - let privk = - keychain.make_microblock_secret_key(203 + ix, &((203 + ix) as u64).to_be_bytes()); + let privk = keychain.make_microblock_secret_key(203 + ix, &(203 + ix).to_be_bytes()); let pubkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(&privk)); if pubkh == *pubkey_hash { return Some(privk); } } - return None; + None } /// Returns true iff `b` is within `0.1%` of `a`. @@ -1073,7 +1062,7 @@ fn bitcoind_integration_test() { // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); @@ -1088,7 +1077,7 @@ fn bitcoind_integration_test() { .filter(|block| block.get("burn_amount").unwrap().as_u64().unwrap() > 0) .collect(); assert!( - burn_blocks_with_burns.len() >= 1, + !burn_blocks_with_burns.is_empty(), "Burn block sortitions {} should be >= 1", burn_blocks_with_burns.len() ); @@ -1096,7 +1085,7 @@ fn bitcoind_integration_test() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1184,7 +1173,7 @@ fn confirm_unparsed_ongoing_ops() { bitcoin_regtest_controller::TEST_MAGIC_BYTES .lock() .unwrap() - .replace(['Z' as u8, 'Z' as u8]); + .replace([b'Z', b'Z']); // let's trigger another mining loop: this should create an invalid block commit. // this bitcoin block will contain the valid commit created before (so, a second stacks block) @@ -1209,7 +1198,7 @@ fn confirm_unparsed_ongoing_ops() { // query the miner's account nonce - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); @@ -1305,9 +1294,9 @@ fn most_recent_utxo_integration_test() { let smallest_utxo = smallest_utxo.unwrap(); let mut biggest_utxo = biggest_utxo.unwrap(); - eprintln!("Last-spent UTXO is {:?}", &last_utxo); - eprintln!("Smallest UTXO is {:?}", &smallest_utxo); - eprintln!("Biggest UTXO is {:?}", &biggest_utxo); + eprintln!("Last-spent UTXO is {last_utxo:?}"); + eprintln!("Smallest UTXO is {smallest_utxo:?}"); + eprintln!("Biggest UTXO is {biggest_utxo:?}"); assert_eq!(last_utxo, smallest_utxo); assert_ne!(biggest_utxo, last_utxo); @@ -1354,9 +1343,9 @@ pub fn get_account_result( account: &F, ) -> Result { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/accounts/{}?proof=0", http_origin, account); + let path = format!("{http_origin}/v2/accounts/{account}?proof=0"); let res = client.get(&path).send()?.json::()?; - info!("Account response: {:#?}", res); + info!("Account response: {res:#?}"); Ok(Account { balance: u128::from_str_radix(&res.balance[2..], 16).unwrap(), locked: u128::from_str_radix(&res.locked[2..], 16).unwrap(), @@ -1371,19 +1360,19 @@ pub fn get_account(http_origin: &str, account: &F) -> Acco pub fn get_neighbors(conf: &Config) -> Option { let client = reqwest::blocking::Client::new(); let http_origin = format!("http://{}", &conf.node.rpc_bind); - let path = format!("{}/v2/neighbors", http_origin); + let path = format!("{http_origin}/v2/neighbors"); client.get(&path).send().ok()?.json().ok() } pub fn get_pox_info(http_origin: &str) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/pox", http_origin); + let path = format!("{http_origin}/v2/pox"); client.get(&path).send().ok()?.json::().ok() } fn get_chain_tip(http_origin: &str) -> (ConsensusHash, BlockHeaderHash) { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/info", http_origin); + let path = format!("{http_origin}/v2/info"); let res = client .get(&path) .send() @@ -1404,7 +1393,7 @@ fn get_chain_tip(http_origin: &str) -> (ConsensusHash, BlockHeaderHash) { fn get_chain_tip_height(http_origin: &str) -> u64 { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/info", http_origin); + let path = format!("{http_origin}/v2/info"); let res = client .get(&path) .send() @@ -1427,10 +1416,8 @@ pub fn get_contract_src( } else { "".to_string() }; - let path = format!( - "{}/v2/contracts/source/{}/{}{}", - http_origin, contract_addr, contract_name, query_string - ); + let path = + format!("{http_origin}/v2/contracts/source/{contract_addr}/{contract_name}{query_string}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -1719,7 +1706,7 @@ fn liquid_ustx_integration() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "ReplaceByFee"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", replaced_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{replaced_txid}")); // mine 1 burn block for the miner to issue the next block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1848,7 +1835,7 @@ fn lockup_integration() { } } } - assert_eq!(found, true); + assert!(found); // block #2 won't unlock STX next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1882,7 +1869,7 @@ fn stx_transfer_btc_integration_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let _spender_btc_addr = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Regtest, LegacyBitcoinAddressType::PublicKeyHash, @@ -1892,7 +1879,7 @@ fn stx_transfer_btc_integration_test() { let spender_2_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk); - let spender_2_addr: PrincipalData = spender_2_stx_addr.clone().into(); + let spender_2_addr: PrincipalData = spender_2_stx_addr.into(); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -1948,7 +1935,7 @@ fn stx_transfer_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -1975,8 +1962,8 @@ fn stx_transfer_btc_integration_test() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -1986,7 +1973,7 @@ fn stx_transfer_btc_integration_test() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -2017,7 +2004,7 @@ fn stx_transfer_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2046,8 +2033,8 @@ fn stx_transfer_btc_integration_test() { // let's fire off our transfer op. let transfer_stx_op = TransferStxOp { - sender: spender_2_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_2_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -2057,7 +2044,7 @@ fn stx_transfer_btc_integration_test() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -2111,7 +2098,7 @@ fn stx_delegate_btc_integration_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); @@ -2119,11 +2106,7 @@ fn stx_delegate_btc_integration_test() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -2132,7 +2115,7 @@ fn stx_delegate_btc_integration_test() { amount: 100300, }); conf.initial_balances.push(InitialBalance { - address: recipient_addr.clone().into(), + address: recipient_addr.into(), amount: 300, }); @@ -2226,7 +2209,7 @@ fn stx_delegate_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2252,8 +2235,8 @@ fn stx_delegate_btc_integration_test() { // let's fire off our delegate op. let del_stx_op = DelegateStxOp { - sender: spender_stx_addr.clone(), - delegate_to: recipient_addr.clone(), + sender: spender_stx_addr, + delegate_to: recipient_addr, reward_addr: None, delegated_ustx: 100_000, // to be filled in @@ -2264,7 +2247,7 @@ fn stx_delegate_btc_integration_test() { until_burn_height: None, }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -2298,7 +2281,7 @@ fn stx_delegate_btc_integration_test() { Value::Principal(spender_addr.clone()), Value::UInt(100_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -2372,7 +2355,7 @@ fn stack_stx_burn_op_test() { let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr_1: StacksAddress = to_addr(&spender_sk_1); - let spender_addr_1: PrincipalData = spender_stx_addr_1.clone().into(); + let spender_addr_1: PrincipalData = spender_stx_addr_1.into(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_stx_addr_2: StacksAddress = to_addr(&spender_sk_2); @@ -2390,7 +2373,7 @@ fn stack_stx_burn_op_test() { amount: first_bal, }); conf.initial_balances.push(InitialBalance { - address: recipient_addr.clone().into(), + address: recipient_addr.into(), amount: second_bal, }); @@ -2506,8 +2489,8 @@ fn stack_stx_burn_op_test() { info!("Bootstrapped to 2.5, submitting stack-stx and pre-stx op..."); - let signer_sk_1 = spender_sk_1.clone(); - let signer_sk_2 = spender_sk_2.clone(); + let signer_sk_1 = spender_sk_1; + let signer_sk_2 = spender_sk_2; let signer_pk_1 = StacksPublicKey::from_private(&signer_sk_1); let pox_addr = PoxAddress::Standard(spender_stx_addr_1, Some(AddressHashMode::SerializeP2PKH)); @@ -2540,7 +2523,7 @@ fn stack_stx_burn_op_test() { let mut miner_signer_2 = Keychain::default(conf.node.seed.clone()).generate_op_signer(); let pre_stx_op_2 = PreStxOp { - output: spender_stx_addr_2.clone(), + output: spender_stx_addr_2, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2619,13 +2602,13 @@ fn stack_stx_burn_op_test() { // `stacked_ustx` should be large enough to avoid ERR_STACKING_THRESHOLD_NOT_MET from Clarity let stack_stx_op_with_some_signer_key = BlockstackOperationType::StackStx(StackStxOp { - sender: spender_stx_addr_1.clone(), + sender: spender_stx_addr_1, reward_addr: pox_addr.clone(), stacked_ustx: 10000000000000, num_cycles: 6, signer_key: Some(signer_key), max_amount: Some(u128::MAX), - auth_id: Some(auth_id.into()), + auth_id: Some(auth_id), // to be filled in vtxindex: 0, txid: Txid([0u8; 32]), @@ -2633,7 +2616,7 @@ fn stack_stx_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); + let mut spender_signer_1 = BurnchainOpSigner::new(signer_sk_1, false); assert!( btc_regtest_controller .submit_operation( @@ -2647,7 +2630,7 @@ fn stack_stx_burn_op_test() { ); let stack_stx_op_with_no_signer_key = BlockstackOperationType::StackStx(StackStxOp { - sender: spender_stx_addr_2.clone(), + sender: spender_stx_addr_2, reward_addr: pox_addr.clone(), stacked_ustx: 10000000000000, num_cycles: 6, @@ -2661,7 +2644,7 @@ fn stack_stx_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); + let mut spender_signer_2 = BurnchainOpSigner::new(signer_sk_2, false); assert!( btc_regtest_controller .submit_operation( @@ -2740,7 +2723,7 @@ fn stack_stx_burn_op_test() { for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh).unwrap(); for stacking_op in stacking_ops.into_iter() { - debug!("Stacking op queried from sortdb: {:?}", stacking_op); + debug!("Stacking op queried from sortdb: {stacking_op:?}"); match stacking_op.signer_key { Some(_) => found_some = true, None => found_none = true, @@ -2775,17 +2758,13 @@ fn vote_for_aggregate_key_burn_op_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let pox_pubkey = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let _pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let _pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -2912,7 +2891,7 @@ fn vote_for_aggregate_key_burn_op_test() { // setup stack-stx tx - let signer_sk = spender_sk.clone(); + let signer_sk = spender_sk; let signer_pk = StacksPublicKey::from_private(&signer_sk); let pox_addr = PoxAddress::Standard(spender_stx_addr, Some(AddressHashMode::SerializeP2PKH)); @@ -2959,7 +2938,7 @@ fn vote_for_aggregate_key_burn_op_test() { let mut miner_signer = Keychain::default(conf.node.seed.clone()).generate_op_signer(); let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -3012,7 +2991,7 @@ fn vote_for_aggregate_key_burn_op_test() { BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { signer_key, signer_index, - sender: spender_stx_addr.clone(), + sender: spender_stx_addr, round: 0, reward_cycle, aggregate_key, @@ -3023,7 +3002,7 @@ fn vote_for_aggregate_key_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer = BurnchainOpSigner::new(signer_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(signer_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -3251,16 +3230,16 @@ fn bitcoind_forking_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); @@ -3355,17 +3334,17 @@ fn should_fix_2771() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // okay, let's figure out the burn block we want to fork away. let reorg_height = 208; - warn!("Will trigger re-org at block {}", reorg_height); + warn!("Will trigger re-org at block {reorg_height}"); let burn_header_hash_to_fork = btc_regtest_controller.get_block_hash(reorg_height); btc_regtest_controller.invalidate_block(&burn_header_hash_to_fork); btc_regtest_controller.build_next_block(1); @@ -3407,10 +3386,10 @@ fn make_signed_microblock( version: rng.gen(), sequence: seq, prev_block: parent_block, - tx_merkle_root: tx_merkle_root, + tx_merkle_root, signature: MessageSignature([0u8; 65]), }, - txs: txs, + txs, }; mblock.sign(block_privk).unwrap(); mblock @@ -3574,9 +3553,8 @@ fn microblock_fork_poison_integration_test() { ); eprintln!( - "Created first microblock: {}: {:?}", - &first_microblock.block_hash(), - &first_microblock + "Created first microblock: {}: {first_microblock:?}", + &first_microblock.block_hash() ); // NOTE: this microblock conflicts because it has the same parent as the first microblock, @@ -3585,9 +3563,8 @@ fn microblock_fork_poison_integration_test() { make_signed_microblock(&privk, vec![second_unconfirmed_tx], stacks_tip, 1); eprintln!( - "Created second conflicting microblock: {}: {:?}", - &second_microblock.block_hash(), - &second_microblock + "Created second conflicting microblock: {}: {second_microblock:?}", + &second_microblock.block_hash() ); (first_microblock, second_microblock) }; @@ -3598,7 +3575,7 @@ fn microblock_fork_poison_integration_test() { .unwrap(); // post the first microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3616,7 +3593,7 @@ fn microblock_fork_poison_integration_test() { .unwrap(); // post the second microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3737,7 +3714,7 @@ fn microblock_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // let's query the miner's account nonce: - info!("Miner account: {}", miner_account); + info!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 1); @@ -3854,9 +3831,8 @@ fn microblock_integration_test() { ); eprintln!( - "Created first microblock: {}: {:?}", - &first_microblock.block_hash(), - &first_microblock + "Created first microblock: {}: {first_microblock:?}", + &first_microblock.block_hash() ); /* let second_microblock = @@ -3869,9 +3845,8 @@ fn microblock_integration_test() { 1, ); eprintln!( - "Created second microblock: {}: {:?}", - &second_microblock.block_hash(), - &second_microblock + "Created second microblock: {}: {second_microblock:?}", + &second_microblock.block_hash() ); (first_microblock, second_microblock) }; @@ -3882,7 +3857,7 @@ fn microblock_integration_test() { .unwrap(); // post the first microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3894,7 +3869,7 @@ fn microblock_integration_test() { assert_eq!(res, format!("{}", &first_microblock.block_hash())); - eprintln!("\n\nBegin testing\nmicroblock: {:?}\n\n", &first_microblock); + eprintln!("\n\nBegin testing\nmicroblock: {first_microblock:?}\n\n"); let account = get_account(&http_origin, &spender_addr); assert_eq!(account.nonce, 1); @@ -3906,7 +3881,7 @@ fn microblock_integration_test() { .unwrap(); // post the second microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -4037,7 +4012,7 @@ fn microblock_integration_test() { burn_blocks_with_burns.len() ); for burn_block in burn_blocks_with_burns { - eprintln!("{}", burn_block); + eprintln!("{burn_block}"); } let mut prior = None; @@ -4090,13 +4065,11 @@ fn microblock_integration_test() { // we can query unconfirmed state from the microblock we announced let path = format!( - "{}/v2/accounts/{}?proof=0&tip={}", - &http_origin, - &spender_addr, + "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", &tip_info.unanchored_tip.unwrap() ); - eprintln!("{:?}", &path); + eprintln!("{path:?}"); let mut iter_count = 0; let res = loop { @@ -4107,7 +4080,7 @@ fn microblock_integration_test() { match http_resp.json::() { Ok(x) => break x, Err(e) => { - warn!("Failed to query {}; will try again. Err = {:?}", &path, e); + warn!("Failed to query {path}; will try again. Err = {e:?}"); iter_count += 1; assert!(iter_count < 10, "Retry limit reached querying account"); sleep_ms(1000); @@ -4116,17 +4089,14 @@ fn microblock_integration_test() { }; }; - info!("Account Response = {:#?}", res); + info!("Account Response = {res:#?}"); assert_eq!(res.nonce, 2); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 96300); // limited by chaining for next_nonce in 2..5 { // verify that the microblock miner can automatically pick up transactions - debug!( - "Try to send unconfirmed tx from {} to {} nonce {}", - &spender_addr, &recipient, next_nonce - ); + debug!("Try to send unconfirmed tx from {spender_addr} to {recipient} nonce {next_nonce}"); let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( &spender_sk, next_nonce, @@ -4136,14 +4106,14 @@ fn microblock_integration_test() { 1000, ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(unconfirmed_tx_bytes.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -4153,7 +4123,7 @@ fn microblock_integration_test() { .txid() .to_string() ); - eprintln!("Sent {}", &res); + eprintln!("Sent {res}"); } else { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -4171,15 +4141,13 @@ fn microblock_integration_test() { // we can query _new_ unconfirmed state from the microblock we announced let path = format!( - "{}/v2/accounts/{}?proof=0&tip={}", - &http_origin, - &spender_addr, + "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", &tip_info.unanchored_tip.unwrap() ); let res_text = client.get(&path).send().unwrap().text().unwrap(); - eprintln!("text of {}\n{}", &path, &res_text); + eprintln!("text of {path}\n{res_text}"); let res = client .get(&path) @@ -4187,8 +4155,8 @@ fn microblock_integration_test() { .unwrap() .json::() .unwrap(); - eprintln!("{:?}", &path); - eprintln!("{:#?}", res); + eprintln!("{path:?}"); + eprintln!("{res:#?}"); // advanced! assert_eq!(res.nonce, next_nonce + 1); @@ -4209,10 +4177,7 @@ fn filter_low_fee_tx_integration_test() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); @@ -4232,7 +4197,7 @@ fn filter_low_fee_tx_integration_test() { if ix < 5 { // low-fee make_stacks_transfer( - &spender_sk, + spender_sk, 0, 1000 + (ix as u64), conf.burnchain.chain_id, @@ -4242,7 +4207,7 @@ fn filter_low_fee_tx_integration_test() { } else { // high-fee make_stacks_transfer( - &spender_sk, + spender_sk, 0, 2000 + (ix as u64), conf.burnchain.chain_id, @@ -4296,14 +4261,9 @@ fn filter_low_fee_tx_integration_test() { // First five accounts have a transaction. The miner will consider low fee transactions, // but rank by estimated fee rate. - for i in 0..5 { - let account = get_account(&http_origin, &spender_addrs[i]); - assert_eq!(account.nonce, 1); - } - - // last five accounts have transaction - for i in 5..10 { - let account = get_account(&http_origin, &spender_addrs[i]); + // Last five accounts have transaction + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, spender_addr); assert_eq!(account.nonce, 1); } @@ -4317,10 +4277,7 @@ fn filter_long_runtime_tx_integration_test() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); @@ -4341,7 +4298,7 @@ fn filter_long_runtime_tx_integration_test() { .map(|(ix, spender_sk)| { let recipient = StacksAddress::from_string(ADDR_4).unwrap(); make_stacks_transfer( - &spender_sk, + spender_sk, 0, 1000 + (ix as u64), conf.burnchain.chain_id, @@ -4393,8 +4350,8 @@ fn filter_long_runtime_tx_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // no transactions mined - for i in 0..10 { - let account = get_account(&http_origin, &spender_addrs[i]); + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, &spender_addr); assert_eq!(account.nonce, 0); } @@ -4517,10 +4474,7 @@ fn size_check_integration_test() { giant_contract.push(' '); } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4652,13 +4606,10 @@ fn size_check_integration_test() { panic!("Spender address nonce incremented past 1"); } - debug!("Spender {},{}: {:?}", ix, &spender_addr, &res); + debug!("Spender {ix},{spender_addr}: {res:?}"); } - eprintln!( - "anchor_block_txs: {}, micro_block_txs: {}", - anchor_block_txs, micro_block_txs - ); + eprintln!("anchor_block_txs: {anchor_block_txs}, micro_block_txs: {micro_block_txs}"); if anchor_block_txs >= 2 && micro_block_txs >= 2 { break; @@ -4693,10 +4644,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4723,7 +4671,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { i as u64, 1100000, conf.burnchain.chain_id, - &format!("small-{}", i), + &format!("small-{i}"), &small_contract, ); ret.push(tx); @@ -4849,10 +4797,10 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("large-").is_some() { + if tsc.name.to_string().contains("large-") { num_big_anchored_txs += 1; total_big_txs_per_block += 1; - } else if tsc.name.to_string().find("small").is_some() { + } else if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -4868,8 +4816,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, max_big_txs_per_block: {}, total_big_txs_per_block: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, max_big_txs_per_block, total_big_txs_per_block, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}, total_big_txs_per_block: {total_big_txs_per_block}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert!(max_big_txs_per_block > 0); @@ -4902,10 +4849,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..20) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..20).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4930,15 +4874,14 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { let txs: Vec<_> = spender_sks .iter() .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( + make_contract_publish_microblock_only( spender_sk, 0, 600000, conf.burnchain.chain_id, "small", &small_contract, - ); - tx + ) }) .collect(); @@ -5049,7 +4992,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("small").is_some() { + if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -5061,8 +5004,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert_eq!(max_big_txs_per_microblock, 5); @@ -5090,10 +5032,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..25) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..25).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -5115,15 +5054,14 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { let txs: Vec> = spender_sks .iter() .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( + make_contract_publish_microblock_only( spender_sk, 0, 1149230, conf.burnchain.chain_id, "small", &small_contract, - ); - tx + ) }) .collect(); @@ -5222,7 +5160,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("small").is_some() { + if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -5234,8 +5172,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert_eq!(max_big_txs_per_microblock, 3); @@ -5252,13 +5189,9 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { return; } - let spender_sks: Vec<_> = (0..4) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..4).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let spender_addrs_c32: Vec = - spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let spender_addrs_c32: Vec = spender_sks.iter().map(to_addr).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -5292,7 +5225,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { 0, 1049230, conf.burnchain.chain_id, - &format!("large-{}", ix), + &format!("large-{ix}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -5334,9 +5267,9 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { ) ) (begin - (crash-me \"{}\")) + (crash-me \"large-contract-{}-{ix}\")) ", - &format!("large-contract-{}-{}", &spender_addrs_c32[ix], &ix) + &spender_addrs_c32[ix] ) )] } else { @@ -5347,7 +5280,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { i as u64, 210000, conf.burnchain.chain_id, - &format!("small-{}-{}", ix, i), + &format!("small-{ix}-{i}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -5389,8 +5322,8 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { ) ) (begin - (crash-me \"{}\")) - ", &format!("small-contract-{}-{}-{}", &spender_addrs_c32[ix], &ix, i)) + (crash-me \"small-contract-{}-{ix}-{i}\")) + ", spender_addrs_c32[ix]) ); ret.push(tx); } @@ -5486,7 +5419,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { let mut total_big_txs_in_microblocks = 0; for block in blocks { - eprintln!("block {:?}", &block); + eprintln!("block {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); let mut num_big_anchored_txs = 0; @@ -5499,12 +5432,12 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - eprintln!("tx: {:?}", &parsed); + eprintln!("tx: {parsed:?}"); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("large-").is_some() { + if tsc.name.to_string().contains("large-") { num_big_anchored_txs += 1; total_big_txs_in_blocks += 1; - } else if tsc.name.to_string().find("small").is_some() { + } else if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_in_microblocks += 1; } @@ -5520,12 +5453,10 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { } info!( - "max_big_txs_per_microblock: {}, max_big_txs_per_block: {}", - max_big_txs_per_microblock, max_big_txs_per_block + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}" ); info!( - "total_big_txs_in_microblocks: {}, total_big_txs_in_blocks: {}", - total_big_txs_in_microblocks, total_big_txs_in_blocks + "total_big_txs_in_microblocks: {total_big_txs_in_microblocks}, total_big_txs_in_blocks: {total_big_txs_in_blocks}" ); // at most one big tx per block and at most one big tx per stream, always. @@ -5605,7 +5536,7 @@ fn block_replay_integration_test() { // let's query the miner's account nonce: - info!("Miner account: {}", miner_account); + info!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 1); @@ -5638,7 +5569,7 @@ fn block_replay_integration_test() { tip_block.consensus_serialize(&mut tip_block_bytes).unwrap(); for i in 0..1024 { - let path = format!("{}/v2/blocks/upload/{}", &http_origin, &tip_consensus_hash); + let path = format!("{http_origin}/v2/blocks/upload/{tip_consensus_hash}"); let res_text = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -5648,7 +5579,7 @@ fn block_replay_integration_test() { .text() .unwrap(); - eprintln!("{}: text of {}\n{}", i, &path, &res_text); + eprintln!("{i}: text of {path}\n{res_text}"); } test_observer::clear(); @@ -6022,11 +5953,11 @@ fn mining_events_integration_test() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10000000, }); conf.initial_balances.push(InitialBalance { - address: addr_2.clone().into(), + address: addr_2.into(), amount: 10000000, }); @@ -6121,7 +6052,7 @@ fn mining_events_integration_test() { // check mined microblock events let mined_microblock_events = test_observer::get_mined_microblocks(); - assert!(mined_microblock_events.len() >= 1); + assert!(!mined_microblock_events.is_empty()); // check tx events in the first microblock // 1 success: 1 contract publish, 2 error (on chain transactions) @@ -6136,15 +6067,12 @@ fn mining_events_integration_test() { execution_cost, .. }) => { - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); assert_eq!(fee, &620000); assert_eq!( execution_cost, @@ -6176,15 +6104,12 @@ fn mining_events_integration_test() { txid.to_string(), "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6" ); - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); } _ => panic!("unexpected event type"), } @@ -6197,15 +6122,12 @@ fn mining_events_integration_test() { execution_cost, .. }) => { - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); assert_eq!(fee, &600000); assert_eq!( execution_cost, @@ -6304,7 +6226,7 @@ fn block_limit_hit_integration_test() { let (mut conf, _miner_account) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10_000_000, }); conf.initial_balances.push(InitialBalance { @@ -6432,8 +6354,8 @@ fn block_limit_hit_integration_test() { assert_eq!(tx_third_block.len(), 3); let txid_1_exp = tx_third_block[1].get("txid").unwrap().as_str().unwrap(); let txid_4_exp = tx_third_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_1), txid_1_exp); - assert_eq!(format!("0x{}", txid_4), txid_4_exp); + assert_eq!(format!("0x{txid_1}"), txid_1_exp); + assert_eq!(format!("0x{txid_4}"), txid_4_exp); let tx_fourth_block = mined_block_events[4] .get("transactions") @@ -6443,8 +6365,8 @@ fn block_limit_hit_integration_test() { assert_eq!(tx_fourth_block.len(), 3); let txid_2_exp = tx_fourth_block[1].get("txid").unwrap().as_str().unwrap(); let txid_3_exp = tx_fourth_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_2), txid_2_exp); - assert_eq!(format!("0x{}", txid_3), txid_3_exp); + assert_eq!(format!("0x{txid_2}"), txid_2_exp); + assert_eq!(format!("0x{txid_3}"), txid_3_exp); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6516,7 +6438,7 @@ fn microblock_limit_hit_integration_test() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10_000_000, }); conf.initial_balances.push(InitialBalance { @@ -6656,10 +6578,7 @@ fn microblock_limit_hit_integration_test() { let txid_3 = submit_tx(&http_origin, &tx_3); let txid_4 = submit_tx(&http_origin, &tx_4); - eprintln!( - "transactions: {},{},{},{}", - &txid_1, &txid_2, &txid_3, &txid_4 - ); + eprintln!("transactions: {txid_1},{txid_2},{txid_3},{txid_4}"); sleep_ms(50_000); @@ -6702,8 +6621,8 @@ fn microblock_limit_hit_integration_test() { assert_eq!(tx_first_mblock.len(), 2); let txid_1_exp = tx_first_mblock[0].get("txid").unwrap().as_str().unwrap(); let txid_4_exp = tx_first_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_1), txid_1_exp); - assert_eq!(format!("0x{}", txid_4), txid_4_exp); + assert_eq!(format!("0x{txid_1}"), txid_1_exp); + assert_eq!(format!("0x{txid_4}"), txid_4_exp); let tx_second_mblock = mined_mblock_events[1] .get("transactions") @@ -6713,8 +6632,8 @@ fn microblock_limit_hit_integration_test() { assert_eq!(tx_second_mblock.len(), 2); let txid_2_exp = tx_second_mblock[0].get("txid").unwrap().as_str().unwrap(); let txid_3_exp = tx_second_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_2), txid_2_exp); - assert_eq!(format!("0x{}", txid_3), txid_3_exp); + assert_eq!(format!("0x{txid_2}"), txid_2_exp); + assert_eq!(format!("0x{txid_3}"), txid_3_exp); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6761,7 +6680,7 @@ fn block_large_tx_integration_test() { test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { - address: spender_addr.clone().into(), + address: spender_addr.into(), amount: 10000000, }); @@ -6837,10 +6756,7 @@ fn block_large_tx_integration_test() { let normal_txid = submit_tx(&http_origin, &tx); let huge_txid = submit_tx(&http_origin, &tx_2); - eprintln!( - "Try to mine a too-big tx. Normal = {}, TooBig = {}", - &normal_txid, &huge_txid - ); + eprintln!("Try to mine a too-big tx. Normal = {normal_txid}, TooBig = {huge_txid}"); next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 1200); eprintln!("Finished trying to mine a too-big tx"); @@ -6848,7 +6764,7 @@ fn block_large_tx_integration_test() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", huge_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6898,7 +6814,7 @@ fn microblock_large_tx_integration_test_FLAKY() { test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10000000, }); @@ -6981,7 +6897,7 @@ fn microblock_large_tx_integration_test_FLAKY() { // Check that the microblock contains the first tx. let microblock_events = test_observer::get_microblocks(); - assert!(microblock_events.len() >= 1); + assert!(!microblock_events.is_empty()); let microblock = microblock_events[0].clone(); let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); @@ -6994,7 +6910,7 @@ fn microblock_large_tx_integration_test_FLAKY() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", huge_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); test_observer::clear(); channel.stop_chains_coordinator(); @@ -7020,18 +6936,10 @@ fn pox_integration_test() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let pox_2_pubkey = Secp256k1PublicKey::from_private(&StacksPrivateKey::new()); - let pox_2_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_2_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_2_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_2_pubkey).to_bytes()); let pox_2_address = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Testnet, @@ -7145,15 +7053,12 @@ fn pox_integration_test() { let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 0); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 0); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 210); @@ -7191,7 +7096,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7205,14 +7110,14 @@ fn pox_integration_test() { submit_tx(&http_origin, &tx); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); test_observer::clear(); // now let's mine until the next reward cycle starts ... while sort_height < ((14 * pox_constants.reward_cycle_length) + 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); @@ -7220,16 +7125,13 @@ fn pox_integration_test() { .block_height_to_reward_cycle(sort_height) .expect("Expected to be able to get reward cycle"); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); assert!(pox_info.pox_activation_threshold_ustx > 1500000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 1000000000000000); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); @@ -7281,8 +7183,7 @@ fn pox_integration_test() { // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward // cycle length of 15 blocks, is a burnchain height of 300) assert_eq!(parsed.to_string(), - format!("(ok (tuple (lock-amount u1000000000000000) (stacker {}) (unlock-burn-height u300)))", - &spender_addr)); + format!("(ok (tuple (lock-amount u1000000000000000) (stacker {spender_addr}) (unlock-burn-height u300)))")); tested = true; } } @@ -7307,7 +7208,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal / 2), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_2_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7331,7 +7232,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal / 2), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_2_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7348,20 +7249,17 @@ fn pox_integration_test() { while sort_height < ((15 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 2000000000000000); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); @@ -7403,19 +7301,16 @@ fn pox_integration_test() { while sort_height < ((16 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 2000000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + assert!(pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 240); assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 235); assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); @@ -7472,11 +7367,11 @@ fn pox_integration_test() { assert_eq!(recipient_slots.len(), 2); assert_eq!( - recipient_slots.get(&format!("{}", &pox_2_address)).cloned(), + recipient_slots.get(&format!("{pox_2_address}")).cloned(), Some(7u64) ); assert_eq!( - recipient_slots.get(&format!("{}", &pox_1_address)).cloned(), + recipient_slots.get(&format!("{pox_1_address}")).cloned(), Some(7u64) ); @@ -7490,7 +7385,7 @@ fn pox_integration_test() { while sort_height < ((17 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // get the canonical chain tip @@ -7513,7 +7408,7 @@ fn pox_integration_test() { while sort_height < ((18 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); @@ -7661,7 +7556,7 @@ fn atlas_integration_test() { // (stx-to-burn uint)) let namespace = "passport"; let salt = "some-salt"; - let salted_namespace = format!("{}{}", namespace, salt); + let salted_namespace = format!("{namespace}{salt}"); let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); let tx_1 = make_contract_call( &user_1, @@ -7677,14 +7572,14 @@ fn atlas_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_1.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -7759,14 +7654,14 @@ fn atlas_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_2.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -7810,14 +7705,14 @@ fn atlas_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -7830,7 +7725,7 @@ fn atlas_integration_test() { while sort_height < few_blocks { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Then check that the follower is correctly replicating the attachment @@ -7852,27 +7747,23 @@ fn atlas_integration_test() { while sort_height < few_blocks { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Poll GET v2/attachments/ for i in 1..10 { let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { - let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); + while !attachments_did_sync { + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); assert_eq!(attachment_response.attachment.content, zonefile_hex); @@ -7944,20 +7835,16 @@ fn atlas_integration_test() { // Now wait for the node to sync the attachment let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { + while !attachments_did_sync { let zonefile_hex = "facade00"; let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { eprintln!("Success syncing attachment - {}", res.text().unwrap()); attachments_did_sync = true; @@ -7966,7 +7853,7 @@ fn atlas_integration_test() { if timeout == 0 { panic!("Failed syncing 1 attachments between 2 neon runloops within 60s - Something is wrong"); } - eprintln!("Attachment {} not sync'd yet", zonefile_hex); + eprintln!("Attachment {zonefile_hex} not sync'd yet"); thread::sleep(Duration::from_millis(1000)); } } @@ -7980,9 +7867,9 @@ fn atlas_integration_test() { let namespace = "passport"; for i in 1..10 { let user = StacksPrivateKey::new(); - let zonefile_hex = format!("facade0{}", i); + let zonefile_hex = format!("facade0{i}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - let name = format!("johndoe{}", i); + let name = format!("johndoe{i}"); let tx = make_contract_call( &user_1, 2 + i, @@ -8007,14 +7894,14 @@ fn atlas_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8040,20 +7927,16 @@ fn atlas_integration_test() { for i in 1..10 { let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { - let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); + while !attachments_did_sync { + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); assert_eq!(attachment_response.attachment.content, zonefile_hex); @@ -8072,7 +7955,7 @@ fn atlas_integration_test() { // Ensure that we the attached sidecar was able to receive a total of 10 attachments // This last assertion is flacky for some reason, it does not worth bullying the CI or disabling this whole test // We're using an inequality as a best effort, to make sure that **some** attachments were received. - assert!(test_observer::get_attachments().len() > 0); + assert!(!test_observer::get_attachments().is_empty()); test_observer::clear(); channel.stop_chains_coordinator(); @@ -8122,8 +8005,8 @@ fn antientropy_integration_test() { // Prepare the config of the follower node let (mut conf_follower_node, _) = neon_integration_test_conf(); let bootstrap_node_url = format!( - "{}@{}", - bootstrap_node_public_key, conf_bootstrap_node.node.p2p_bind + "{bootstrap_node_public_key}@{}", + conf_bootstrap_node.node.p2p_bind ); conf_follower_node.connection_options.disable_block_download = true; conf_follower_node.node.set_bootstrap_nodes( @@ -8195,10 +8078,10 @@ fn antientropy_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); for i in 0..(target_height - 3) { - eprintln!("Mine block {}", i); + eprintln!("Mine block {i}"); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Let's setup the follower now. @@ -8214,11 +8097,11 @@ fn antientropy_integration_test() { println!("Follower has finished"); } Ok(x) => { - println!("Follower gave a bad signal: {:?}", &x); + println!("Follower gave a bad signal: {x:?}"); panic!(); } Err(e) => { - println!("Failed to recv: {:?}", &e); + println!("Failed to recv: {e:?}"); panic!(); } }; @@ -8255,8 +8138,7 @@ fn antientropy_integration_test() { let mut sort_height = channel.get_sortitions_processed(); while sort_height < (target_height + 200) as u64 { eprintln!( - "Follower sortition is {}, target is {}", - sort_height, + "Follower sortition is {sort_height}, target is {}", target_height + 200 ); wait_for_runloop(&blocks_processed); @@ -8269,8 +8151,7 @@ fn antientropy_integration_test() { // wait for block height to reach target let mut tip_height = get_chain_tip_height(&http_origin); eprintln!( - "Follower Stacks tip height is {}, wait until {} >= {} - 3", - tip_height, tip_height, target_height + "Follower Stacks tip height is {tip_height}, wait until {tip_height} >= {target_height} - 3" ); let btc_regtest_controller = BitcoinRegtestController::with_burnchain( @@ -8285,7 +8166,7 @@ fn antientropy_integration_test() { sleep_ms(1000); tip_height = get_chain_tip_height(&http_origin); - eprintln!("Follower Stacks tip height is {}", tip_height); + eprintln!("Follower Stacks tip height is {tip_height}"); if burnchain_deadline < get_epoch_time_secs() { burnchain_deadline = get_epoch_time_secs() + 60; @@ -8304,12 +8185,13 @@ fn antientropy_integration_test() { channel.stop_chains_coordinator(); } +#[allow(clippy::too_many_arguments)] fn wait_for_mined( btc_regtest_controller: &mut BitcoinRegtestController, blocks_processed: &Arc, http_origin: &str, users: &[StacksPrivateKey], - account_before_nonces: &Vec, + account_before_nonces: &[u64], batch_size: usize, batches: usize, index_block_hashes: &mut Vec, @@ -8318,7 +8200,7 @@ fn wait_for_mined( let mut account_after_nonces = vec![0; batches * batch_size]; let mut all_mined = false; for _k in 0..10 { - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); sleep_ms(10_000); let (ch, bhh) = get_chain_tip(http_origin); @@ -8327,29 +8209,28 @@ fn wait_for_mined( if let Some(last_ibh) = index_block_hashes.last() { if *last_ibh != ibh { index_block_hashes.push(ibh); - eprintln!("Tip is now {}", &ibh); + eprintln!("Tip is now {ibh}"); } } for j in 0..batches * batch_size { - let account_after = get_account(&http_origin, &to_addr(&users[j])); + let account_after = get_account(http_origin, &to_addr(&users[j])); let account_after_nonce = account_after.nonce; account_after_nonces[j] = account_after_nonce; - if account_before_nonces[j] + 1 <= account_after_nonce { + if account_before_nonces[j] < account_after_nonce { all_mined_vec[j] = true; } } - all_mined = all_mined_vec.iter().fold(true, |acc, elem| acc && *elem); + all_mined = all_mined_vec.iter().all(|elem| *elem); if all_mined { break; } } if !all_mined { eprintln!( - "Failed to mine all transactions: nonces = {:?}, expected {:?} + {}", - &account_after_nonces, account_before_nonces, batch_size + "Failed to mine all transactions: nonces = {account_after_nonces:?}, expected {account_before_nonces:?} + {batch_size}" ); panic!(); } @@ -8450,7 +8331,7 @@ fn atlas_stress_integration_test() { // (stx-to-burn uint)) let namespace = "passport"; let salt = "some-salt"; - let salted_namespace = format!("{}{}", namespace, salt); + let salted_namespace = format!("{namespace}{salt}"); let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); let tx_1 = make_contract_call( &user_1, @@ -8466,14 +8347,14 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_1.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -8548,7 +8429,7 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -8626,14 +8507,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8660,8 +8541,7 @@ fn atlas_stress_integration_test() { } if !all_mined { eprintln!( - "Failed to mine all transactions: nonce = {}, expected {}", - account_after_nonce, + "Failed to mine all transactions: nonce = {account_after_nonce}, expected {}", account_before.nonce + (batch_size as u64) ); panic!(); @@ -8682,14 +8562,14 @@ fn atlas_stress_integration_test() { &[Value::buff_from(namespace.as_bytes().to_vec()).unwrap()], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_4.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8723,7 +8603,7 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let fqn = format!("janedoe{}.passport", j); + let fqn = format!("janedoe{j}.passport"); let fqn_bytes = fqn.as_bytes().to_vec(); let salt = format!("{:04x}", j); let salt_bytes = salt.as_bytes().to_vec(); @@ -8746,7 +8626,7 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -8755,9 +8635,8 @@ fn atlas_stress_integration_test() { .unwrap(); eprintln!( - "sent preorder for {}:\n{:#?}", - &to_addr(&users[batches * batch_size + j]), - res + "sent preorder for {}:\n{res:#?}", + &to_addr(&users[batches * batch_size + j]) ); if !res.status().is_success() { panic!(""); @@ -8784,10 +8663,10 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let salt = format!("{:04x}", j); + let name = format!("janedoe{j}"); + let salt = format!("{j:04x}"); - let zonefile_hex = format!("facade01{:04x}", j); + let zonefile_hex = format!("facade01{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8816,14 +8695,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8850,8 +8729,8 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let zonefile_hex = format!("facade02{:04x}", j); + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade02{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8879,14 +8758,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8913,8 +8792,8 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let zonefile_hex = format!("facade03{:04x}", j); + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade03{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8945,14 +8824,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8984,8 +8863,8 @@ fn atlas_stress_integration_test() { &[ibh], ) .unwrap(); - if indexes.len() > 0 { - attachment_indexes.insert(ibh.clone(), indexes.clone()); + if !indexes.is_empty() { + attachment_indexes.insert(*ibh, indexes.clone()); } for index in indexes.iter() { @@ -8995,14 +8874,14 @@ fn atlas_stress_integration_test() { params![ibh, u64_to_sql(*index).unwrap()], "content_hash") .unwrap(); - if hashes.len() > 0 { + if !hashes.is_empty() { assert_eq!(hashes.len(), 1); - attachment_hashes.insert((ibh.clone(), *index), hashes.pop()); + attachment_hashes.insert((*ibh, *index), hashes.pop()); } } } } - eprintln!("attachment_indexes = {:?}", &attachment_indexes); + eprintln!("attachment_indexes = {attachment_indexes:?}"); let max_request_time_ms = 100; @@ -9017,12 +8896,10 @@ fn atlas_stress_integration_test() { ..cmp::min((i + 1) * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, l)] .to_vec(); let path = format!( - "{}/v2/attachments/inv?index_block_hash={}&pages_indexes={}", - &http_origin, - ibh, + "{http_origin}/v2/attachments/inv?index_block_hash={ibh}&pages_indexes={}", attachments_batch .iter() - .map(|a| format!("{}", &a)) + .map(|a| format!("{a}")) .collect::>() .join(",") ); @@ -9034,40 +8911,34 @@ fn atlas_stress_integration_test() { if res.status().is_success() { let attachment_inv_response: GetAttachmentsInvResponse = res.json().unwrap(); - eprintln!( - "attachment inv response for {}: {:?}", - &path, &attachment_inv_response - ); + eprintln!("attachment inv response for {path}: {attachment_inv_response:?}"); } else { - eprintln!("Bad response for `{}`: `{:?}`", &path, res.text().unwrap()); + eprintln!("Bad response for `{path}`: `{:?}`", res.text().unwrap()); panic!(); } } let ts_end = get_epoch_time_ms(); let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); // requests should take no more than max_request_time_ms assert!( total_time < attempts * max_request_time_ms, - "Atlas inventory request is too slow: {} >= {} * {}", - total_time, - attempts, - max_request_time_ms + "Atlas inventory request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" ); } - for i in 0..l { - if attachments[i] == 0 { + for attachment in attachments.iter().take(l) { + if *attachment == 0 { continue; } let content_hash = attachment_hashes - .get(&(*ibh, attachments[i])) + .get(&(*ibh, *attachment)) .cloned() .unwrap() .unwrap(); - let path = format!("{}/v2/attachments/{}", &http_origin, &content_hash); + let path = format!("{http_origin}/v2/attachments/{content_hash}"); let attempts = 10; let ts_begin = get_epoch_time_ms(); @@ -9076,26 +8947,20 @@ fn atlas_stress_integration_test() { if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); - eprintln!( - "attachment response for {}: {:?}", - &path, &attachment_response - ); + eprintln!("attachment response for {path}: {attachment_response:?}"); } else { - eprintln!("Bad response for `{}`: `{:?}`", &path, res.text().unwrap()); + eprintln!("Bad response for `{path}`: `{:?}`", res.text().unwrap()); panic!(); } } let ts_end = get_epoch_time_ms(); let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); // requests should take no more than max_request_time_ms assert!( total_time < attempts * max_request_time_ms, - "Atlas chunk request is too slow: {} >= {} * {}", - total_time, - attempts, - max_request_time_ms + "Atlas chunk request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" ); } } @@ -9129,8 +8994,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value (unwrap! (increment) (err u1)) (unwrap! (increment) (err u1)) (ok (var-get counter)))) - "# - .to_string(); + "#; let spender_sk = StacksPrivateKey::new(); let spender_addr = to_addr(&spender_sk); @@ -9144,7 +9008,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value conf.estimation.fee_rate_window_size = window_size; conf.initial_balances.push(InitialBalance { - address: spender_addr.clone().into(), + address: spender_addr.into(), amount: 10000000000, }); test_observer::spawn(); @@ -9181,7 +9045,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value 110000, conf.burnchain.chain_id, "increment-contract", - &max_contract_src, + max_contract_src, ), ); run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 212, &conf); @@ -9198,7 +9062,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value i, // nonce i * 100000, // payment conf.burnchain.chain_id, - &spender_addr.into(), + &spender_addr, "increment-contract", "increment-many", &[], @@ -9213,12 +9077,12 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value { // Read from the fee estimation endpoin. - let path = format!("{}/v2/fees/transaction", &http_origin); + let path = format!("{http_origin}/v2/fees/transaction"); let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { - address: spender_addr.clone().into(), - contract_name: ContractName::try_from("increment-contract").unwrap(), - function_name: ClarityName::try_from("increment-many").unwrap(), + address: spender_addr, + contract_name: ContractName::from("increment-contract"), + function_name: ClarityName::from("increment-many"), function_args: vec![], }); @@ -9255,8 +9119,8 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value let last_cost = response_estimated_costs[i - 1]; assert_eq!(curr_cost, last_cost); - let curr_rate = response_top_fee_rates[i] as f64; - let last_rate = response_top_fee_rates[i - 1] as f64; + let curr_rate = response_top_fee_rates[i]; + let last_rate = response_top_fee_rates[i - 1]; assert!(curr_rate >= last_rate); } @@ -9438,7 +9302,7 @@ fn use_latest_tip_integration_test() { let client = reqwest::blocking::Client::new(); // Post the microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -9452,7 +9316,7 @@ fn use_latest_tip_integration_test() { // Wait for the microblock to be accepted sleep_ms(5_000); - let path = format!("{}/v2/info", &http_origin); + let path = format!("{http_origin}/v2/info"); let mut iter_count = 0; loop { let tip_info = client @@ -9594,26 +9458,26 @@ fn test_flash_block_skip_tenure() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // fault injection: force tenures to take too long - std::env::set_var("STX_TEST_SLOW_TENURE".to_string(), "11000".to_string()); + std::env::set_var("STX_TEST_SLOW_TENURE", "11000"); for i in 0..10 { // build one bitcoin block every 10 seconds - eprintln!("Build bitcoin block +{}", i); + eprintln!("Build bitcoin block +{i}"); btc_regtest_controller.build_next_block(1); sleep_ms(10000); } // at least one tenure was skipped let num_skipped = missed_tenures.load(Ordering::SeqCst); - eprintln!("Skipped {} tenures", &num_skipped); + eprintln!("Skipped {num_skipped} tenures"); assert!(num_skipped > 1); // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); - eprintln!("account = {:?}", &account); + eprintln!("account = {account:?}"); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 2); @@ -9696,15 +9560,15 @@ fn test_problematic_txs_are_not_stored() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: spender_addr_1.clone(), + address: spender_addr_1, amount: 1_000_000_000_000, }); conf.initial_balances.push(InitialBalance { - address: spender_addr_2.clone(), + address: spender_addr_2, amount: 1_000_000_000_000, }); conf.initial_balances.push(InitialBalance { - address: spender_addr_3.clone(), + address: spender_addr_3, amount: 1_000_000_000_000, }); @@ -9754,7 +9618,7 @@ fn test_problematic_txs_are_not_stored() { let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); - let tx_edge_body = format!("{}u1 {}", tx_edge_body_start, tx_edge_body_end); + let tx_edge_body = format!("{tx_edge_body_start}u1 {tx_edge_body_end}"); let tx_edge = make_contract_publish( &spender_sk_1, @@ -9772,7 +9636,7 @@ fn test_problematic_txs_are_not_stored() { let exceeds_repeat_factor = edge_repeat_factor + 1; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -9790,7 +9654,7 @@ fn test_problematic_txs_are_not_stored() { let high_repeat_factor = 128 * 1024; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -9840,25 +9704,24 @@ fn test_problematic_txs_are_not_stored() { fn find_new_files(dirp: &str, prev_files: &HashSet) -> (Vec, HashSet) { let dirpp = Path::new(dirp); - debug!("readdir {}", dirp); + debug!("readdir {dirp}"); let cur_files = fs::read_dir(dirp).unwrap(); let mut new_files = vec![]; let mut cur_files_set = HashSet::new(); for cur_file in cur_files.into_iter() { let cur_file = cur_file.unwrap(); let cur_file_fullpath = dirpp.join(cur_file.path()).to_str().unwrap().to_string(); - test_debug!("file in {}: {}", dirp, &cur_file_fullpath); + test_debug!("file in {dirp}: {cur_file_fullpath}"); cur_files_set.insert(cur_file_fullpath.clone()); if prev_files.contains(&cur_file_fullpath) { - test_debug!("already contains {}", &cur_file_fullpath); + test_debug!("already contains {cur_file_fullpath}"); continue; } - test_debug!("new file {}", &cur_file_fullpath); + test_debug!("new file {cur_file_fullpath}"); new_files.push(cur_file_fullpath); } debug!( - "Checked {} for new files; found {} (all: {})", - dirp, + "Checked {dirp} for new files; found {} (all: {})", new_files.len(), cur_files_set.len() ); @@ -9894,8 +9757,7 @@ fn spawn_follower_node( conf.initial_balances = initial_conf.initial_balances.clone(); conf.burnchain.epochs = initial_conf.burnchain.epochs.clone(); - conf.burnchain.ast_precheck_size_height = - initial_conf.burnchain.ast_precheck_size_height.clone(); + conf.burnchain.ast_precheck_size_height = initial_conf.burnchain.ast_precheck_size_height; conf.connection_options.inv_sync_interval = 3; @@ -9923,12 +9785,12 @@ fn test_problematic_blocks_are_not_mined() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_mined"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -10000,7 +9862,7 @@ fn test_problematic_blocks_are_not_mined() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -10018,7 +9880,7 @@ fn test_problematic_blocks_are_not_mined() { let high_repeat_factor = 3200; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -10054,20 +9916,11 @@ fn test_problematic_blocks_are_not_mined() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10116,7 +9969,7 @@ fn test_problematic_blocks_are_not_mined() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10125,31 +9978,25 @@ fn test_problematic_blocks_are_not_mined() { assert_eq!(cur_ast_rules, ASTRules::Typical); // add another bad tx to the mempool - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); btc_regtest_controller.build_next_block(1); // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for blocks to be processed"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10162,7 +10009,7 @@ fn test_problematic_blocks_are_not_mined() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic blocks for _i in 0..6 { @@ -10185,10 +10032,8 @@ fn test_problematic_blocks_are_not_mined() { // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) - for txid in &[&tx_high_txid] { - test_debug!("Problematic tx {} should be dropped", txid); - assert!(get_unconfirmed_tx(&http_origin, txid).is_none()); - } + test_debug!("Problematic tx {tx_high_txid} should be dropped"); + assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); // no block contained the tx_high bad transaction, ever let blocks = test_observer::get_blocks(); @@ -10233,8 +10078,7 @@ fn test_problematic_blocks_are_not_mined() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -10278,12 +10122,12 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_relayed_or_stored"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -10355,7 +10199,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -10372,7 +10216,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let high_repeat_factor = 70; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -10408,20 +10252,11 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10470,7 +10305,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10484,14 +10319,14 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { loop { sleep_ms(1_000); let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); if new_tip.block_height > tip.block_height { break; } } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10510,7 +10345,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10521,23 +10356,17 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // add another bad tx to the mempool. // because the miner is now non-conformant, it should mine this tx. - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic blocks for _i in 0..6 { @@ -10549,7 +10378,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10616,10 +10445,8 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { break; } eprintln!( - "\nFollower is at burn block {} stacks block {} (bad_block is {})\n", - follower_tip_info.burn_block_height, - follower_tip_info.stacks_tip_height, - bad_block_height + "\nFollower is at burn block {} stacks block {} (bad_block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); sleep_ms(1000); } @@ -10627,8 +10454,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -10648,8 +10474,8 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let follower_tip_info = get_chain_info(&follower_conf); eprintln!( - "\nFollower is at burn block {} stacks block {} (bad block is {})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, bad_block_height + "\nFollower is at burn block {} stacks block {} (bad block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); // follower rejects the bad block @@ -10669,12 +10495,12 @@ fn test_problematic_microblocks_are_not_mined() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_mined"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -10752,7 +10578,7 @@ fn test_problematic_microblocks_are_not_mined() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish_microblock_only( &spender_sk_2, @@ -10767,11 +10593,10 @@ fn test_problematic_microblocks_are_not_mined() { .txid(); // something stupidly high over the expression depth - let high_repeat_factor = - (AST_CALL_STACK_DEPTH_BUFFER as u64) + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish_microblock_only( &spender_sk_3, @@ -10807,24 +10632,12 @@ fn test_problematic_microblocks_are_not_mined() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + info!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); - info!( - "Submitted problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); + info!("Submitted problematic tx_exceeds transaction {tx_exceeds_txid}"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10876,7 +10689,7 @@ fn test_problematic_microblocks_are_not_mined() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10885,39 +10698,27 @@ fn test_problematic_microblocks_are_not_mined() { assert_eq!(cur_ast_rules, ASTRules::Typical); // add another bad tx to the mempool - info!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + info!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); - info!( - "Submitted problematic tx_high transaction {}", - &tx_high_txid - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); + info!("Submitted problematic tx_high transaction {tx_high_txid}"); btc_regtest_controller.build_next_block(1); - info!( - "Mined block after submitting problematic tx_high transaction {}", - &tx_high_txid - ); + info!("Mined block after submitting problematic tx_high transaction {tx_high_txid}"); // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for runloop to advance"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10930,7 +10731,7 @@ fn test_problematic_microblocks_are_not_mined() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some microblocks, and log problematic microblocks for _i in 0..6 { @@ -10956,10 +10757,8 @@ fn test_problematic_microblocks_are_not_mined() { // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) - for txid in &[&tx_high_txid] { - test_debug!("Problematic tx {} should be dropped", txid); - assert!(get_unconfirmed_tx(&http_origin, txid).is_none()); - } + test_debug!("Problematic tx {tx_high_txid} should be dropped"); + assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); // no microblock contained the tx_high bad transaction, ever let microblocks = test_observer::get_microblocks(); @@ -11004,8 +10803,7 @@ fn test_problematic_microblocks_are_not_mined() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -11049,12 +10847,12 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_relayed_or_stored"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -11134,7 +10932,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish_microblock_only( &spender_sk_2, @@ -11149,11 +10947,10 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { .txid(); // greatly exceeds AST depth, but is still mineable without a stack overflow - let high_repeat_factor = - (AST_CALL_STACK_DEPTH_BUFFER as u64) + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish_microblock_only( &spender_sk_3, @@ -11189,20 +10986,11 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -11254,7 +11042,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -11267,14 +11055,14 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for runloop to advance"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11293,7 +11081,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11304,24 +11092,18 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // add another bad tx to the mempool. // because the miner is now non-conformant, it should mine this tx. - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic microblocks for _i in 0..6 { @@ -11333,7 +11115,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11356,7 +11138,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // at least one was problematic. // the miner might make multiple microblocks (only some of which are confirmed), so also check // the event observer to see that we actually picked up tx_high - assert!(all_new_files.len() >= 1); + assert!(!all_new_files.is_empty()); // tx_high got mined by the miner let microblocks = test_observer::get_microblocks(); @@ -11381,8 +11163,8 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { .split("0x") .collect(); let bad_block_id_hex = parts[1]; - debug!("bad_block_id_hex = '{}'", &bad_block_id_hex); - Some(StacksBlockId::from_hex(&bad_block_id_hex).unwrap()) + debug!("bad_block_id_hex = '{bad_block_id_hex}'"); + Some(StacksBlockId::from_hex(bad_block_id_hex).unwrap()) }; } } @@ -11420,8 +11202,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -11441,8 +11222,8 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let follower_tip_info = get_chain_info(&follower_conf); eprintln!( - "\nFollower is at burn block {} stacks block {} (bad block is {})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, bad_block_height + "\nFollower is at burn block {} stacks block {} (bad block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); // follower rejects the bad microblock -- can't append subsequent blocks @@ -11570,9 +11351,8 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st let full_iters_code = full_iters_code_parts.join("\n "); - let iters_mod_code_parts: Vec = (0..iters_mod) - .map(|cnt| format!("0x{:0>2x}", cnt)) - .collect(); + let iters_mod_code_parts: Vec = + (0..iters_mod).map(|cnt| format!("0x{cnt:0>2x}")).collect(); let iters_mod_code = format!("(list {})", iters_mod_code_parts.join(" ")); @@ -11599,7 +11379,7 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st (define-private (crash-me-folder (input (buff 1)) (ctr uint)) (begin ;; full_iters_code - {} + {full_iters_code} (+ u1 ctr) ) ) @@ -11608,20 +11388,17 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st ;; call index-of (iters_256 * 256) times (fold crash-me-folder BUFF_TO_BYTE u0) ;; call index-of iters_mod times - (fold crash-me-folder {} u0) + (fold crash-me-folder {iters_mod_code} u0) (print name) (ok u0) ) ) (begin - (crash-me \"{}\")) + (crash-me \"large-{nonce}-{addr_prefix}-{num_index_of}\")) ", - full_iters_code, - iters_mod_code, - &format!("large-{}-{}-{}", nonce, &addr_prefix, num_index_of) ); - eprintln!("{}", &code); + eprintln!("{code}"); code } @@ -11636,13 +11413,14 @@ pub fn make_expensive_tx_chain( chain_id: u32, mblock_only: bool, ) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; + let num_index_of = 256; for nonce in 0..25 { let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("large-{}-{}-{}", nonce, &addr_prefix, 256); - eprintln!("Make tx {}", &contract_name); + let contract_name = format!("large-{nonce}-{addr_prefix}-{num_index_of}"); + eprintln!("Make tx {contract_name}"); let tx = if mblock_only { make_contract_publish_microblock_only( privk, @@ -11650,7 +11428,7 @@ pub fn make_expensive_tx_chain( 1049230 + nonce + fee_plus, chain_id, &contract_name, - &make_runtime_sized_contract(256, nonce, &addr_prefix), + &make_runtime_sized_contract(num_index_of, nonce, &addr_prefix), ) } else { make_contract_publish( @@ -11659,7 +11437,7 @@ pub fn make_expensive_tx_chain( 1049230 + nonce + fee_plus, chain_id, &contract_name, - &make_runtime_sized_contract(256, nonce, &addr_prefix), + &make_runtime_sized_contract(num_index_of, nonce, &addr_prefix), ) }; chain.push(tx); @@ -11673,7 +11451,7 @@ pub fn make_random_tx_chain( chain_id: u32, mblock_only: bool, ) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; for nonce in 0..25 { @@ -11689,8 +11467,8 @@ pub fn make_random_tx_chain( let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("large-{}-{}-{}", nonce, &addr_prefix, random_iters); - eprintln!("Make tx {}", &contract_name); + let contract_name = format!("large-{nonce}-{addr_prefix}-{random_iters}"); + eprintln!("Make tx {contract_name}"); let tx = if mblock_only { make_contract_publish_microblock_only( privk, @@ -11716,7 +11494,7 @@ pub fn make_random_tx_chain( } fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; for nonce in 0..25 { @@ -11732,8 +11510,8 @@ fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("crct-{}-{}-{}", nonce, &addr_prefix, random_iters); - eprintln!("Make tx {}", &contract_name); + let contract_name = format!("crct-{nonce}-{addr_prefix}-{random_iters}"); + eprintln!("Make tx {contract_name}"); let tx = make_contract_publish_microblock_only( privk, nonce, @@ -11758,10 +11536,7 @@ fn test_competing_miners_build_on_same_chain( return; } - let privks: Vec<_> = (0..100) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() .map(|privk| { @@ -11808,9 +11583,8 @@ fn test_competing_miners_build_on_same_chain( confs[i].node.set_bootstrap_nodes( format!( - "{}@{}", + "{}@{p2p_bind}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind ), chain_id, peer_version, @@ -11818,8 +11592,8 @@ fn test_competing_miners_build_on_same_chain( } // use long reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let reward_cycle_len = 100; let prepare_phase_len = 20; let pox_constants = PoxConstants::new( @@ -11856,10 +11630,10 @@ fn test_competing_miners_build_on_same_chain( btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -11879,8 +11653,8 @@ fn test_competing_miners_build_on_same_chain( let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -11888,7 +11662,7 @@ fn test_competing_miners_build_on_same_chain( loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 1: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 1: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -11898,23 +11672,19 @@ fn test_competing_miners_build_on_same_chain( next_block_and_wait(&mut btc_regtest_controller, &blocks_processed[0]); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -11938,7 +11708,7 @@ fn test_competing_miners_build_on_same_chain( let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -11948,7 +11718,7 @@ fn test_competing_miners_build_on_same_chain( // mine quickly -- see if we can induce flash blocks for i in 0..1000 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); } @@ -12023,10 +11793,7 @@ fn microblock_miner_multiple_attempts() { conf.burnchain.max_rbf = 1000000; conf.node.wait_time_for_blocks = 1_000; - let privks: Vec<_> = (0..100) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() .map(|privk| { @@ -12076,7 +11843,7 @@ fn microblock_miner_multiple_attempts() { // let's query the miner's account nonce: let account = get_account(&http_origin, &miner_account); - eprintln!("Miner account: {:?}", &account); + eprintln!("Miner account: {account:?}"); let all_txs: Vec<_> = privks .iter() @@ -12085,10 +11852,9 @@ fn microblock_miner_multiple_attempts() { .collect(); let _handle = thread::spawn(move || { - for txi in 0..all_txs.len() { - for j in 0..all_txs[txi].len() { - let tx = &all_txs[txi][j]; - eprintln!("\n\nSubmit tx {},{}\n\n", txi, j); + for (i, txi) in all_txs.iter().enumerate() { + for (j, tx) in txi.iter().enumerate() { + eprintln!("\n\nSubmit tx {i},{j}\n\n"); submit_tx(&http_origin, tx); sleep_ms(1_000); } @@ -12119,12 +11885,13 @@ fn min_txs() { test_observer::spawn(); test_observer::register_any(&mut conf); + let path = "/tmp/activate_vrf_key.min_txs.json"; conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; - conf.miner.activated_vrf_key_path = Some("/tmp/activate_vrf_key.min_txs.json".to_string()); + conf.miner.activated_vrf_key_path = Some(path.to_string()); - if fs::metadata("/tmp/activate_vrf_key.min_txs.json").is_ok() { - fs::remove_file("/tmp/activate_vrf_key.min_txs.json").unwrap(); + if fs::metadata(path).is_ok() { + fs::remove_file(path).unwrap(); } let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -12176,18 +11943,18 @@ fn min_txs() { let _sort_height = channel.get_sortitions_processed(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); submit_tx(&http_origin, &publish); - debug!("Try to build too-small a block {}", &i); + debug!("Try to build too-small a block {i}"); next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 15); } @@ -12195,12 +11962,12 @@ fn min_txs() { for block in blocks { let transactions = block.get("transactions").unwrap().as_array().unwrap(); if transactions.len() > 1 { - debug!("Got block: {:?}", &block); + debug!("Got block: {block:?}"); assert!(transactions.len() >= 4); } } - let saved_vrf_key = RelayerThread::load_saved_vrf_key("/tmp/activate_vrf_key.min_txs.json"); + let saved_vrf_key = RelayerThread::load_saved_vrf_key(path); assert!(saved_vrf_key.is_some()); test_observer::clear(); @@ -12222,13 +11989,14 @@ fn filter_txs_by_type() { test_observer::spawn(); test_observer::register_any(&mut conf); + let path = "/tmp/activate_vrf_key.filter_txs.json"; conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; - conf.miner.activated_vrf_key_path = Some("/tmp/activate_vrf_key.filter_txs.json".to_string()); + conf.miner.activated_vrf_key_path = Some(path.to_string()); conf.miner.txs_to_consider = [MemPoolWalkTxTypes::TokenTransfer].into_iter().collect(); - if fs::metadata("/tmp/activate_vrf_key.filter_txs.json").is_ok() { - fs::remove_file("/tmp/activate_vrf_key.filter_txs.json").unwrap(); + if fs::metadata(path).is_ok() { + fs::remove_file(path).unwrap(); } let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -12280,13 +12048,13 @@ fn filter_txs_by_type() { let _sort_height = channel.get_sortitions_processed(); let mut sent_txids = HashSet::new(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); let parsed = StacksTransaction::consensus_deserialize(&mut &publish[..]).unwrap(); @@ -12298,7 +12066,7 @@ fn filter_txs_by_type() { let blocks = test_observer::get_blocks(); for block in blocks { - info!("block: {:?}", &block); + info!("block: {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); @@ -12313,7 +12081,7 @@ fn filter_txs_by_type() { } } - let saved_vrf_key = RelayerThread::load_saved_vrf_key("/tmp/activate_vrf_key.filter_txs.json"); + let saved_vrf_key = RelayerThread::load_saved_vrf_key(path); assert!(saved_vrf_key.is_some()); test_observer::clear(); @@ -12391,13 +12159,13 @@ fn filter_txs_by_origin() { let _sort_height = channel.get_sortitions_processed(); let mut sent_txids = HashSet::new(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); let parsed = StacksTransaction::consensus_deserialize(&mut &publish[..]).unwrap(); @@ -12409,7 +12177,7 @@ fn filter_txs_by_origin() { let blocks = test_observer::get_blocks(); for block in blocks { - info!("block: {:?}", &block); + info!("block: {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); @@ -12479,12 +12247,12 @@ fn bitcoin_reorg_flap() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // stop bitcoind and copy its DB to simulate a chain flap @@ -12496,7 +12264,7 @@ fn bitcoin_reorg_flap() { new_conf.node.working_dir = format!("{}.new", &conf.node.working_dir); fs::create_dir_all(&new_conf.node.working_dir).unwrap(); - copy_dir_all(&btcd_dir, &new_conf.get_burnchain_path_str()).unwrap(); + copy_dir_all(&btcd_dir, new_conf.get_burnchain_path_str()).unwrap(); // resume let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -12681,8 +12449,7 @@ fn bitcoin_reorg_flap_with_follower() { let mut miner_sort_height = miner_channel.get_sortitions_processed(); let mut follower_sort_height = follower_channel.get_sortitions_processed(); eprintln!( - "Miner sort height: {}, follower sort height: {}", - miner_sort_height, follower_sort_height + "Miner sort height: {miner_sort_height}, follower sort height: {follower_sort_height}" ); while miner_sort_height < 210 && follower_sort_height < 210 { @@ -12695,8 +12462,7 @@ fn bitcoin_reorg_flap_with_follower() { miner_sort_height = miner_channel.get_sortitions_processed(); follower_sort_height = miner_channel.get_sortitions_processed(); eprintln!( - "Miner sort height: {}, follower sort height: {}", - miner_sort_height, follower_sort_height + "Miner sort height: {miner_sort_height}, follower sort height: {follower_sort_height}" ); } @@ -12709,7 +12475,7 @@ fn bitcoin_reorg_flap_with_follower() { new_conf.node.working_dir = format!("{}.new", &conf.node.working_dir); fs::create_dir_all(&new_conf.node.working_dir).unwrap(); - copy_dir_all(&btcd_dir, &new_conf.get_burnchain_path_str()).unwrap(); + copy_dir_all(&btcd_dir, new_conf.get_burnchain_path_str()).unwrap(); // resume let mut btcd_controller = BitcoinCoreController::new(conf.clone()); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 42b894398d..22f58291ff 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -123,10 +123,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest (), - G: FnMut(&mut NeonConfig) -> (), - >( + fn new_with_config_modifications( num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>, mut signer_config_modifier: F, @@ -151,8 +148,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>()); @@ -330,10 +326,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest Result<(), String> { - // Make sure that ALL signers accepted the block proposal + // Make sure that at least 70% of signers accepted the block proposal wait_for(timeout_secs, || { let signatures = test_observer::get_stackerdb_chunks() .into_iter() @@ -597,7 +585,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>(); - Ok(signatures.len() == expected_signers.len()) + Ok(signatures.len() > expected_signers.len() * 7 / 10) }) } @@ -634,7 +622,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ()>( +fn setup_stx_btc_node( mut naka_conf: NeonConfig, signer_stacks_private_keys: &[StacksPrivateKey], signer_configs: &[SignerConfig], diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index d0f3dfff83..a7ade631aa 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -43,7 +43,7 @@ use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STA use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{hex_bytes, MerkleHashFunc}; +use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -66,14 +66,16 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL}; +use crate::nakamoto_node::miner::{ + TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, +}; use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_controller, - setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + next_block_and_process_new_stacks_block, setup_epoch_3_reward_set, wait_for, + POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, @@ -118,13 +120,13 @@ impl SignerTest { for stacker_sk in self.signer_stacks_private_keys.iter() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &stacker_sk, + stacker_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, CHAIN_ID_TESTNET, @@ -137,7 +139,7 @@ impl SignerTest { let signer_pk = StacksPublicKey::from_private(stacker_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, self.running_nodes.conf.burnchain.chain_id, @@ -246,7 +248,7 @@ impl SignerTest { .get_reward_set_signers(reward_cycle) .expect("Failed to check if reward set is calculated") .map(|reward_set| { - debug!("Signer set: {:?}", reward_set); + debug!("Signer set: {reward_set:?}"); }) .is_some()) }) @@ -304,10 +306,7 @@ impl SignerTest { // NOTE: signature.len() does not need to equal signers.len(); the stacks miner can finish the block // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); - info!( - "Verifying signatures against signers for reward cycle {:?}", - reward_cycle - ); + info!("Verifying signatures against signers for reward cycle {reward_cycle:?}"); let signers = self.get_reward_set_signers(reward_cycle); // Verify that the signers signed the proposed block @@ -432,7 +431,8 @@ impl SignerTest { /// The stacks node is advanced to epoch 3.0 reward set calculation to ensure the signer set is determined. /// An invalid block proposal is forcibly written to the miner's slot to simulate the miner proposing a block. /// The signers process the invalid block by first verifying it against the stacks node block proposal endpoint. -/// The signers then broadcast a rejection of the miner's proposed block back to the respective .signers-XXX-YYY contract. +/// The signer that submitted the initial block validation request, should issue a broadcast a rejection of the +/// miner's proposed block back to the respective .signers-XXX-YYY contract. /// /// Test Assertion: /// Each signer successfully rejects the invalid block proposal. @@ -456,6 +456,7 @@ fn block_proposal_rejection() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), @@ -789,7 +790,7 @@ fn reloads_signer_set_in() { let send_fee = 180; let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |_config| {}, |_| {}, None, @@ -848,7 +849,7 @@ fn reloads_signer_set_in() { } }; if let Some(ref set) = reward_set { - info!("Signer set: {:?}", set); + info!("Signer set: {set:?}"); } Ok(reward_set.is_some()) }) @@ -912,7 +913,7 @@ fn forked_tenure_testing( let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |config| { // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; @@ -1003,10 +1004,7 @@ fn forked_tenure_testing( signer_test .running_nodes .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(true); + .set(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); // Wait for a stacks block to be broadcasted @@ -1030,7 +1028,7 @@ fn forked_tenure_testing( .nakamoto_blocks_db() .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() - .get(0) + .first() .cloned() .unwrap(); @@ -1038,14 +1036,14 @@ fn forked_tenure_testing( let tip_b = StacksHeaderInfo { anchored_header: StacksBlockHeaderTypes::Nakamoto(tip_b_block.header.clone()), microblock_tail: None, - stacks_block_height: tip_b_block.header.chain_length.into(), + stacks_block_height: tip_b_block.header.chain_length, index_root: TrieHash([0x00; 32]), // we can't know this yet since the block hasn't been processed - consensus_hash: tip_b_block.header.consensus_hash.clone(), - burn_header_hash: tip_sn.burn_header_hash.clone(), + consensus_hash: tip_b_block.header.consensus_hash, + burn_header_hash: tip_sn.burn_header_hash, burn_header_height: tip_sn.block_height as u32, burn_header_timestamp: tip_sn.burn_header_timestamp, anchored_block_size: tip_b_block.serialize_to_vec().len() as u64, - burn_view: Some(tip_b_block.header.consensus_hash.clone()), + burn_view: Some(tip_b_block.header.consensus_hash), }; let blocks = test_observer::get_mined_nakamoto_blocks(); @@ -1083,10 +1081,7 @@ fn forked_tenure_testing( signer_test .running_nodes .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(false); + .set(false); let commits_count = commits_submitted.load(Ordering::SeqCst); if commits_count > commits_before { @@ -1227,10 +1222,8 @@ fn bitcoind_forking_test() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); let miner_address = Keychain::default(conf.node.seed.clone()) @@ -1339,7 +1332,7 @@ fn bitcoind_forking_test() { let post_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; - assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 1 * 2); + assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 2); for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); @@ -1466,7 +1459,7 @@ fn multiple_miners() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1504,7 +1497,7 @@ fn multiple_miners() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let conf = signer_test.running_nodes.conf.clone(); @@ -1525,7 +1518,7 @@ fn multiple_miners() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -1583,10 +1576,7 @@ fn multiple_miners() { let info_1 = get_chain_info(&conf); let info_2 = get_chain_info(&conf_node_2); - info!( - "Issue next block-build request\ninfo 1: {:?}\ninfo 2: {:?}\n", - &info_1, &info_2 - ); + info!("Issue next block-build request\ninfo 1: {info_1:?}\ninfo 2: {info_2:?}\n"); signer_test.mine_block_wait_on_processing( &[&rl1_coord_channels, &rl2_coord_channels], @@ -1597,10 +1587,8 @@ fn multiple_miners() { btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf); // for this test, there should be one block per tenure - let consensus_hash_set: HashSet<_> = blocks - .iter() - .map(|header| header.consensus_hash.clone()) - .collect(); + let consensus_hash_set: HashSet<_> = + blocks.iter().map(|header| header.consensus_hash).collect(); assert_eq!( consensus_hash_set.len(), blocks.len(), @@ -1667,14 +1655,7 @@ fn get_nakamoto_headers(config: &Config) -> Vec { let nakamoto_block_ids: HashSet<_> = test_observer::get_blocks() .into_iter() .filter_map(|block_json| { - if block_json - .as_object() - .unwrap() - .get("miner_signature") - .is_none() - { - return None; - } + block_json.as_object().unwrap().get("miner_signature")?; let block_id = StacksBlockId::from_hex( &block_json .as_object() @@ -1753,7 +1734,7 @@ fn miner_forking() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1779,6 +1760,7 @@ fn miner_forking() { config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); config.node.pox_sync_sample_secs = 30; config.burnchain.pox_reward_length = Some(max_sortitions as u32); + config.miner.block_commit_delay = Duration::from_secs(0); config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { @@ -1795,7 +1777,7 @@ fn miner_forking() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let conf = signer_test.running_nodes.conf.clone(); @@ -1816,7 +1798,7 @@ fn miner_forking() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -1826,8 +1808,8 @@ fn miner_forking() { let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); let Counters { - naka_skip_commit_op, - naka_submitted_commits: second_miner_commits_submitted, + naka_skip_commit_op: skip_commit_op_rl2, + naka_submitted_commits: commits_submitted_rl2, .. } = run_loop_2.counters(); let _run_loop_2_thread = thread::Builder::new() @@ -1848,151 +1830,256 @@ fn miner_forking() { }) .expect("Timed out waiting for boostrapped node to catch up to the miner"); + let commits_submitted_rl1 = signer_test.running_nodes.commits_submitted.clone(); + let skip_commit_op_rl1 = signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .clone(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; - naka_skip_commit_op.0.lock().unwrap().replace(false); + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; info!("------------------------- Reached Epoch 3.0 -------------------------"); - let mut sortitions_seen = Vec::new(); - let run_sortition = || { - info!("Pausing stacks block proposal to force an empty tenure commit from RL2"); - TEST_BROADCAST_STALL.lock().unwrap().replace(true); + info!("Pausing both miners' block commit submissions"); + skip_commit_op_rl1.set(true); + skip_commit_op_rl2.set(true); - let rl2_commits_before = second_miner_commits_submitted.load(Ordering::SeqCst); - let rl1_commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); + info!("Flushing any pending commits to enable custom winner selection"); + let burn_height_before = get_burn_height(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); - naka_skip_commit_op.0.lock().unwrap().replace(false); + info!("------------------------- RL1 Wins Sortition -------------------------"); + info!("Pausing stacks block proposal to force an empty tenure commit from RL2"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); - // wait until a commit is submitted by run_loop_2 - wait_for(60, || { - let commits_count = second_miner_commits_submitted.load(Ordering::SeqCst); - Ok(commits_count > rl2_commits_before) - }) + info!("Unpausing commits from RL1"); + skip_commit_op_rl1.set(false); + + info!("Waiting for commits from RL1"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); + + info!("Pausing commits from RL1"); + skip_commit_op_rl1.set(true); + + let burn_height_before = get_burn_height(); + info!("Mine RL1 Tenure"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + // fetch the current sortition info + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // make sure the tenure was won by RL1 + assert!(tip.sortition, "No sortition was won"); + assert_eq!( + tip.miner_pk_hash.unwrap(), + mining_pkh_1, + "RL1 did not win the sortition" + ); + + info!( + "------------------------- RL2 Wins Sortition With Outdated View -------------------------" + ); + let rl2_commits_before = commits_submitted_rl2.load(Ordering::SeqCst); + + info!("Unpausing commits from RL2"); + skip_commit_op_rl2.set(false); + + info!("Waiting for commits from RL2"); + wait_for(30, || { + Ok(commits_submitted_rl2.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); + + info!("Pausing commits from RL2"); + skip_commit_op_rl2.set(true); + + // unblock block mining + let blocks_len = test_observer::get_blocks().len(); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + // Wait for the block to be broadcasted and processed + wait_for(30, || Ok(test_observer::get_blocks().len() > blocks_len)) + .expect("Timed out waiting for a block to be processed"); + + // sleep for 2*first_proposal_burn_block_timing to prevent the block timing from allowing a fork by the signer set + thread::sleep(Duration::from_secs(first_proposal_burn_block_timing * 2)); + + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) + .into_iter() + .map(|header| { + info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %tip.consensus_hash); + (header.consensus_hash, header) + }) + .collect(); + + let header_info = nakamoto_headers.get(&tip.consensus_hash).unwrap(); + let header = header_info + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .clone(); + + mining_pk_1 + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) .unwrap(); - // wait until a commit is submitted by run_loop_1 - wait_for(60, || { - let commits_count = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits_count > rl1_commits_before) + + let blocks_len = test_observer::get_blocks().len(); + let burn_height_before = get_burn_height(); + info!("Mine RL2 Tenure"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + // Ensure that RL2 doesn't produce a valid block + assert!( + wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)).is_err(), + "RL2 produced a block" + ); + + // fetch the current sortition info + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // make sure the tenure was won by RL2 + assert!(tip.sortition, "No sortition was won"); + assert_eq!( + tip.miner_pk_hash.unwrap(), + mining_pkh_2, + "RL2 did not win the sortition" + ); + + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) + .into_iter() + .map(|header| { + info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %tip.consensus_hash); + (header.consensus_hash, header) }) - .unwrap(); + .collect(); + assert!(!nakamoto_headers.contains_key(&tip.consensus_hash)); - // fetch the current sortition info - let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + info!("------------------------- RL1 RBFs its Own Commit -------------------------"); + info!("Pausing stacks block proposal to test RBF capability"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); - // block commits from RL2 -- this will block until the start of the next iteration - // in this loop. - naka_skip_commit_op.0.lock().unwrap().replace(true); - // ensure RL1 performs an RBF after unblock block broadcast - let rl1_commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); + info!("Unpausing commits from RL1"); + skip_commit_op_rl1.set(false); - // unblock block mining - let blocks_len = test_observer::get_blocks().len(); - TEST_BROADCAST_STALL.lock().unwrap().replace(false); + info!("Waiting for commits from RL1"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); - // wait for a block to be processed (or timeout!) - if let Err(_) = wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)) { - info!("Timeout waiting for a block process: assuming this is because RL2 attempted to fork-- will check at end of test"); - return (sort_tip, false); - } + info!("Pausing commits from RL1"); + skip_commit_op_rl1.set(true); - info!("Nakamoto block processed, waiting for commit from RL1"); + let burn_height_before = get_burn_height(); + info!("Mine RL1 Tenure"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); - // wait for a commit from RL1 - wait_for(60, || { - let commits_count = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits_count > rl1_commits_before) - }) - .unwrap(); + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); - // sleep for 2*first_proposal_burn_block_timing to prevent the block timing from allowing a fork by the signer set - thread::sleep(Duration::from_secs(first_proposal_burn_block_timing * 2)); - (sort_tip, true) - }; + info!("Unpausing commits from RL1"); + skip_commit_op_rl1.set(false); - let mut won_by_miner_2_but_no_tenure = false; - let mut won_by_miner_1_after_tenureless_miner_2 = false; - let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); - // miner 2 is expected to be valid iff: - // (a) its the first nakamoto tenure - // (b) the prior sortition didn't have a tenure (because by this time RL2 will have up-to-date block processing) - let mut expects_miner_2_to_be_valid = true; - // due to the random nature of mining sortitions, the way this test is structured - // is that keeps track of two scenarios that we want to cover, and once enough sortitions - // have been produced to cover those scenarios, it stops and checks the results at the end. - while !(won_by_miner_2_but_no_tenure && won_by_miner_1_after_tenureless_miner_2) { - let nmb_sortitions_seen = sortitions_seen.len(); - assert!(max_sortitions >= nmb_sortitions_seen, "Produced {nmb_sortitions_seen} sortitions, but didn't cover the test scenarios, aborting"); - let (sortition_data, had_tenure) = run_sortition(); - sortitions_seen.push((sortition_data.clone(), had_tenure)); - - let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) - .into_iter() - .map(|header| { - info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %sortition_data.consensus_hash); - (header.consensus_hash.clone(), header) - }) - .collect(); + info!("Waiting for commits from RL1"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); - if had_tenure { - let header_info = nakamoto_headers - .get(&sortition_data.consensus_hash) - .unwrap(); - let header = header_info - .anchored_header - .as_stacks_nakamoto() - .unwrap() - .clone(); - let mined_by_miner_1 = miner_1_pk - .verify( - header.miner_signature_hash().as_bytes(), - &header.miner_signature, - ) - .unwrap(); + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); + // unblock block mining + let blocks_len = test_observer::get_blocks().len(); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); - info!("Block check"; - "height" => header.chain_length, - "consensus_hash" => %header.consensus_hash, - "block_hash" => %header.block_hash(), - "stacks_block_id" => %header.block_id(), - "mined_by_miner_1?" => mined_by_miner_1, - "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); - if !mined_by_miner_1 { - assert!(expects_miner_2_to_be_valid, "If a block was produced by miner 2, we should have expected miner 2 to be valid"); - } else if won_by_miner_2_but_no_tenure { - // the tenure was won by miner 1, they produced a block, and this follows a tenure that miner 2 won but couldn't - // mine during because they tried to fork. - won_by_miner_1_after_tenureless_miner_2 = true; - } + // Wait for the block to be broadcasted and processed + wait_for(30, || Ok(test_observer::get_blocks().len() > blocks_len)) + .expect("Timed out waiting for a block to be processed"); - // even if it was mined by miner 2, their next block commit should be invalid! - expects_miner_2_to_be_valid = false; - } else { - info!("Sortition without tenure"; "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); - assert!(nakamoto_headers - .get(&sortition_data.consensus_hash) - .is_none()); - assert!(!expects_miner_2_to_be_valid, "If no blocks were produced in the tenure, it should be because miner 2 committed to a fork"); - won_by_miner_2_but_no_tenure = true; - expects_miner_2_to_be_valid = true; - } - } + info!("Ensure that RL1 performs an RBF after unblocking block broadcast"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to RBF its old commit op"); + + info!("Mine RL1 Tenure"); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + // fetch the current sortition info + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // make sure the tenure was won by RL1 + assert!(tip.sortition, "No sortition was won"); + assert_eq!( + tip.miner_pk_hash.unwrap(), + mining_pkh_1, + "RL1 did not win the sortition" + ); + + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) + .into_iter() + .map(|header| { + info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %tip.consensus_hash); + (header.consensus_hash, header) + }) + .collect(); + + let header_info = nakamoto_headers.get(&tip.consensus_hash).unwrap(); + let header = header_info + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .clone(); + + mining_pk_1 + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap(); + + info!("------------------------- Verify Peer Data -------------------------"); let peer_1_height = get_chain_info(&conf).stacks_tip_height; let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; @@ -2034,10 +2121,8 @@ fn end_of_tenure() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let long_timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(20); @@ -2185,10 +2270,8 @@ fn retry_on_rejection() { let send_fee = 180; let short_timeout = Duration::from_secs(30); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * 3)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 3)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -2198,7 +2281,7 @@ fn retry_on_rejection() { let sortdb = burnchain.open_sortition_db(true).unwrap(); wait_for(30, || { - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(tip.sortition) }) .expect("Timed out waiting for sortition"); @@ -2324,10 +2407,8 @@ fn signers_broadcast_signed_blocks() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -2345,8 +2426,8 @@ fn signers_broadcast_signed_blocks() { .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); debug!( - "blocks_mined: {},{}, stacks_tip_height: {},{}", - blocks_mined, blocks_before, info.stacks_tip_height, info_before.stacks_tip_height + "blocks_mined: {blocks_mined},{blocks_before}, stacks_tip_height: {},{}", + info.stacks_tip_height, info_before.stacks_tip_height ); Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) }) @@ -2388,11 +2469,7 @@ fn signers_broadcast_signed_blocks() { .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); debug!( - "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", - blocks_mined, - blocks_before, - signer_pushed, - signer_pushed_before, + "blocks_mined: {blocks_mined},{blocks_before}, signers_pushed: {signer_pushed},{signer_pushed_before}, stacks_tip_height: {},{}", info.stacks_tip_height, info_before.stacks_tip_height ); @@ -2432,7 +2509,7 @@ fn empty_sortition() { let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |config| { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; @@ -2499,10 +2576,7 @@ fn empty_sortition() { signer_test .running_nodes .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(true); + .set(true); let blocks_after = signer_test .running_nodes @@ -2593,8 +2667,13 @@ fn empty_sortition() { #[test] #[ignore] -/// This test checks that Epoch 2.5 signers will issue a mock signature per burn block they receive. -fn mock_sign_epoch_25() { +/// This test checks the behavior of signers when an empty sortition arrives +/// before the first block of the previous tenure has been approved. +/// Specifically: +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built +/// off the old sortition +fn empty_sortition_before_approval() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -2610,282 +2689,397 @@ fn mock_sign_epoch_25() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - |_| {}, - |node_config| { - node_config.miner.pre_nakamoto_mock_signing = true; - let epochs = node_config.burnchain.epochs.as_mut().unwrap(); - for epoch in epochs.iter_mut() { - if epoch.epoch_id == StacksEpochId::Epoch25 { - epoch.end_height = 251; - } - if epoch.epoch_id == StacksEpochId::Epoch30 { - epoch.start_height = 251; - } - } + vec![(sender_addr, send_amt + send_fee)], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; }, + |_| {}, None, None, ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let epochs = signer_test - .running_nodes - .conf - .burnchain - .epochs - .clone() - .unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary + signer_test.boot_to_epoch_3(); - signer_test.boot_to_epoch_25_reward_cycle(); + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .unwrap(); - info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); + let info = get_chain_info(&signer_test.running_nodes.conf); + let burn_height_before = info.burn_block_height; + let stacks_height_before = info.stacks_tip_height; - // Mine until epoch 3.0 and ensure that no more mock signatures are received - let reward_cycle = signer_test.get_current_reward_cycle(); - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); - assert_eq!(signer_slot_ids.len(), num_signers); + info!("Forcing miner to ignore signatures for next block"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); - let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + info!("Pausing block commits to trigger an empty sortition."); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); - // Mine until epoch 3.0 and ensure we get a new mock block per epoch 2.5 sortition - let main_poll_time = Instant::now(); - // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. - while signer_test + info!("------------------------- Test Mine Tenure A -------------------------"); + let proposed_before = signer_test .running_nodes - .btc_regtest_controller - .get_headers_height() - < epoch_3_boundary - { - let mut mock_block_mesage = None; - let mock_poll_time = Instant::now(); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); - let current_burn_block_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); - debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); - while mock_block_mesage.is_none() { - std::thread::sleep(Duration::from_millis(100)); - let chunks = test_observer::get_stackerdb_chunks(); - for chunk in chunks - .into_iter() - .filter_map(|chunk| { - if chunk.contract_id != miners_stackerdb_contract { - return None; - } - Some(chunk.modified_slots) - }) - .flatten() - { - if chunk.data.is_empty() { - continue; - } - let SignerMessage::MockBlock(mock_block) = - SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage") - else { - continue; - }; - if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height - { - mock_block - .mock_signatures - .iter() - .for_each(|mock_signature| { - assert!(signer_public_keys.iter().any(|signer| { - mock_signature - .verify( - &StacksPublicKey::from_slice(signer.to_bytes().as_slice()) - .unwrap(), - ) - .expect("Failed to verify mock signature") - })); - }); - mock_block_mesage = Some(mock_block); - break; - } + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + // Mine a regular tenure and wait for a block proposal + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let proposed_count = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + Ok(proposed_count > proposed_before) + }, + ) + .expect("Failed to mine tenure A and propose a block"); + + info!("------------------------- Test Mine Empty Tenure B -------------------------"); + + // Trigger an empty tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + Ok(burn_height == burn_height_before + 2) + }, + ) + .expect("Failed to mine empty tenure"); + + info!("Unpause block commits"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(false); + + info!("Stop ignoring signers and wait for the tip to advance"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip"); + + let info = get_chain_info(&signer_test.running_nodes.conf); + info!("Current state: {:?}", info); + + // Wait for a block with a tenure extend to be mined + wait_for(60, || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + info!("Last block mined: {:?}", last_block); + for tx in last_block["transactions"].as_array().unwrap() { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + if raw_tx == "0x00" { + continue; } - assert!( - mock_poll_time.elapsed() <= Duration::from_secs(15), - "Failed to find mock miner message within timeout" - ); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::TenureChange(payload) = &parsed.payload { + match payload.cause { + TenureChangeCause::Extended => { + info!("Found tenure extend block"); + return Ok(true); + } + TenureChangeCause::BlockFound => {} + } + }; } - assert!( - main_poll_time.elapsed() <= Duration::from_secs(45), - "Timed out waiting to advance epoch 3.0 boundary" - ); - } + Ok(false) + }) + .expect("Timed out waiting for tenure extend"); + + let stacks_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip with STX transfer"); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine a normal tenure after the tenure extend"); + + signer_test.shutdown(); } #[test] #[ignore] -fn multiple_miners_mock_sign_epoch_25() { +/// This test checks the behavior of signers when an empty sortition arrives +/// before the first block of the previous tenure has been proposed. +/// Specifically: +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built +/// off the old sortition +fn empty_sortition_before_proposal() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); let num_signers = 5; let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - - let btc_miner_1_seed = vec![1, 1, 1, 1]; - let btc_miner_2_seed = vec![2, 2, 2, 2]; - let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); - let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - - let node_1_rpc = gen_random_port(); - let node_1_p2p = gen_random_port(); - let node_2_rpc = gen_random_port(); - let node_2_p2p = gen_random_port(); - - let localhost = "127.0.0.1"; - let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); - let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); - let mut node_2_listeners = Vec::new(); - - // partition the signer set so that ~half are listening and using node 1 for RPC and events, - // and the rest are using node 2 - + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - |signer_config| { - let node_host = if signer_config.endpoint.port() % 2 == 0 { - &node_1_rpc_bind - } else { - &node_2_rpc_bind - }; - signer_config.node_host = node_host.to_string(); - }, + vec![(sender_addr, send_amt + send_fee)], |config| { - config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); - config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); - config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); - config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); - - config.node.seed = btc_miner_1_seed.clone(); - config.node.local_peer_seed = btc_miner_1_seed.clone(); - config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); - config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); - config.miner.pre_nakamoto_mock_signing = true; - let epochs = config.burnchain.epochs.as_mut().unwrap(); - for epoch in epochs.iter_mut() { - if epoch.epoch_id == StacksEpochId::Epoch25 { - epoch.end_height = 251; - } - if epoch.epoch_id == StacksEpochId::Epoch30 { - epoch.start_height = 251; - } - } - config.events_observers.retain(|listener| { - let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { - warn!( - "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", - listener.endpoint - ); - return true; - }; - if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { - return true; - } - node_2_listeners.push(listener.clone()); - false - }) + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + |_| {}, + None, None, ); - let conf = signer_test.running_nodes.conf.clone(); - let mut conf_node_2 = conf.clone(); - let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.seed = btc_miner_2_seed.clone(); - conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); - conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); - conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); - conf_node_2.node.miner = true; - conf_node_2.events_observers.clear(); - conf_node_2.events_observers.extend(node_2_listeners); - assert!(!conf_node_2.events_observers.is_empty()); - - let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); - let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + signer_test.boot_to_epoch_3(); - conf_node_2.node.set_bootstrap_nodes( - format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), - conf.burnchain.chain_id, - conf.burnchain.peer_version, - ); + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .unwrap(); - let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); - let _run_loop_2_thread = thread::Builder::new() - .name("run_loop_2".into()) - .spawn(move || run_loop_2.start(None, 0)) - .unwrap(); + let info = get_chain_info(&signer_test.running_nodes.conf); + let stacks_height_before = info.stacks_tip_height; - let epochs = signer_test + info!("Pause block commits to ensure we get an empty sortition"); + signer_test .running_nodes - .conf - .burnchain - .epochs - .clone() - .unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); - signer_test.boot_to_epoch_25_reward_cycle(); + info!("Pause miner so it doesn't propose a block before the next tenure arrives"); + TEST_MINE_STALL.lock().unwrap().replace(true); - info!("------------------------- Reached Epoch 2.5 Reward Cycle-------------------------"); + let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; - // Mine until epoch 3.0 and ensure that no more mock signatures are received - let reward_cycle = signer_test.get_current_reward_cycle(); - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); - assert_eq!(signer_slot_ids.len(), num_signers); + info!("------------------------- Test Mine Tenure A and B -------------------------"); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(2); - let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.burn_block_height == burn_height_before + 2) + }) + .expect("Failed to advance chain tip"); - // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. - while signer_test + // Sleep a bit more to ensure the signers see both burn blocks + sleep_ms(5_000); + + info!("Unpause miner"); + TEST_MINE_STALL.lock().unwrap().replace(false); + + info!("Unpause block commits"); + signer_test .running_nodes - .btc_regtest_controller - .get_headers_height() - < epoch_3_boundary - { - let mut mock_block_mesage = None; - let mock_poll_time = Instant::now(); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); - let current_burn_block_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(false); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip"); + + let info = get_chain_info(&signer_test.running_nodes.conf); + info!("Current state: {:?}", info); + + // Wait for a block with a tenure extend to be mined + wait_for(60, || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + info!("Last block mined: {:?}", last_block); + for tx in last_block["transactions"].as_array().unwrap() { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::TenureChange(payload) = &parsed.payload { + match payload.cause { + TenureChangeCause::Extended => { + info!("Found tenure extend block"); + return Ok(true); + } + TenureChangeCause::BlockFound => {} + } + }; + } + Ok(false) + }) + .expect("Timed out waiting for tenure extend"); + + let stacks_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip with STX transfer"); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine a normal tenure after the tenure extend"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test checks that Epoch 2.5 signers will issue a mock signature per burn block they receive. +fn mock_sign_epoch_25() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |_| {}, + |node_config| { + node_config.miner.pre_nakamoto_mock_signing = true; + let epochs = node_config.burnchain.epochs.as_mut().unwrap(); + for epoch in epochs.iter_mut() { + if epoch.epoch_id == StacksEpochId::Epoch25 { + epoch.end_height = 251; + } + if epoch.epoch_id == StacksEpochId::Epoch30 { + epoch.start_height = 251; + } + } + }, + None, + None, + ); + + let epochs = signer_test + .running_nodes + .conf + .burnchain + .epochs + .clone() + .unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary + + signer_test.boot_to_epoch_25_reward_cycle(); + + info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); + + // Mine until epoch 3.0 and ensure that no more mock signatures are received + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); + assert_eq!(signer_slot_ids.len(), num_signers); + + let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + + // Mine until epoch 3.0 and ensure we get a new mock block per epoch 2.5 sortition + let main_poll_time = Instant::now(); + // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. + while signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height() + < epoch_3_boundary + { + let mut mock_block_mesage = None; + let mock_poll_time = Instant::now(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + let current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); while mock_block_mesage.is_none() { std::thread::sleep(Duration::from_millis(100)); @@ -2933,268 +3127,470 @@ fn multiple_miners_mock_sign_epoch_25() { "Failed to find mock miner message within timeout" ); } + assert!( + main_poll_time.elapsed() <= Duration::from_secs(45), + "Timed out waiting to advance epoch 3.0 boundary" + ); } } #[test] #[ignore] -/// This test asserts that signer set rollover works as expected. -/// Specifically, if a new set of signers are registered for an upcoming reward cycle, -/// old signers shut down operation and the new signers take over with the commencement of -/// the next reward cycle. -fn signer_set_rollover() { - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); +fn multiple_miners_mock_sign_epoch_25() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } - info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let new_num_signers = 4; - - let new_signer_private_keys: Vec<_> = (0..new_num_signers) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); - let new_signer_public_keys: Vec<_> = new_signer_private_keys - .iter() - .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) - .collect(); - let new_signer_addresses: Vec<_> = new_signer_private_keys - .iter() - .map(|sk| tests::to_addr(sk)) - .collect(); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - - let mut initial_balances = new_signer_addresses - .iter() - .map(|addr| (addr.clone(), POX_4_DEFAULT_STACKER_BALANCE)) - .collect::>(); - - initial_balances.push((sender_addr.clone(), (send_amt + send_fee) * 4)); - let run_stamp = rand::random(); + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); - let rpc_port = 51024; - let rpc_bind = format!("127.0.0.1:{}", rpc_port); + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); - // Setup the new signers that will take over - let new_signer_configs = build_signer_config_tomls( - &new_signer_private_keys, - &rpc_bind, - Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. - &Network::Testnet, - "12345", - run_stamp, - 3000 + num_signers, - Some(100_000), - None, - Some(9000 + num_signers), - None, - ); + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); - let new_spawned_signers: Vec<_> = (0..new_num_signers) - .into_iter() - .map(|i| { - info!("spawning signer"); - let signer_config = - SignerConfig::load_from_str(&new_signer_configs[i as usize]).unwrap(); - SpawnedSigner::new(signer_config) - }) - .collect(); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 - // Boot with some initial signer set let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - initial_balances, - |_| {}, - |naka_conf| { - for toml in new_signer_configs.clone() { - let signer_config = SignerConfig::load_from_str(&toml).unwrap(); - info!( - "---- Adding signer endpoint to naka conf ({}) ----", - signer_config.endpoint - ); - - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("{}", signer_config.endpoint), - events_keys: vec![ - EventKeyType::StackerDBChunks, - EventKeyType::BlockProposal, - EventKeyType::BurnchainBlocks, - ], - timeout_ms: 1000, - }); - } - naka_conf.node.rpc_bind = rpc_bind.clone(); + vec![(sender_addr, send_amt + send_fee)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); }, - None, - None, - ); - assert_eq!( - new_spawned_signers[0].config.node_host, - signer_test.running_nodes.conf.node.rpc_bind - ); - // Only stack for one cycle so that the signer set changes - signer_test.num_stacking_cycles = 1_u64; - - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(20); - - // Verify that naka_conf has our new signer's event observers - for toml in &new_signer_configs { - let signer_config = SignerConfig::load_from_str(&toml).unwrap(); - let endpoint = format!("{}", signer_config.endpoint); - assert!(signer_test - .running_nodes - .conf - .events_observers - .iter() - .any(|observer| observer.endpoint == endpoint)); - } + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); - // Advance to the first reward cycle, stacking to the old signers beforehand - - info!("---- Booting to epoch 3 -----"); - signer_test.boot_to_epoch_3(); + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + config.miner.pre_nakamoto_mock_signing = true; + let epochs = config.burnchain.epochs.as_mut().unwrap(); + for epoch in epochs.iter_mut() { + if epoch.epoch_id == StacksEpochId::Epoch25 { + epoch.end_height = 251; + } + if epoch.epoch_id == StacksEpochId::Epoch30 { + epoch.start_height = 251; + } + } + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + let localhost = "127.0.0.1"; + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); - // verify that the first reward cycle has the old signers in the reward set - let reward_cycle = signer_test.get_current_reward_cycle(); - let signer_test_public_keys: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) - .collect(); + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - info!("---- Verifying that the current signers are the old signers ----"); - let current_signers = signer_test.get_reward_set_signers(reward_cycle); - assert_eq!(current_signers.len(), num_signers as usize); - // Verify that the current signers are the same as the old signers - for signer in current_signers.iter() { - assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); - assert!(!new_signer_public_keys.contains(&signer.signing_key.to_vec())); - } + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); - info!("---- Mining a block to trigger the signer set -----"); - // submit a tx so that the miner will mine an extra block - let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, ); - submit_tx(&http_origin, &transfer_tx); - signer_test.mine_nakamoto_block(short_timeout); - let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); - let block_sighash = mined_block.signer_signature_hash; - let signer_signatures = mined_block.signer_signature; - - // verify the mined_block signatures against the OLD signer set - for signature in signer_signatures.iter() { - let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) - .expect("FATAL: Failed to recover pubkey from block sighash"); - assert!(signer_test_public_keys.contains(&pk.to_bytes_compressed())); - assert!(!new_signer_public_keys.contains(&pk.to_bytes_compressed())); - } - - // advance to the next reward cycle, stacking to the new signers beforehand - let reward_cycle = signer_test.get_current_reward_cycle(); - info!("---- Stacking new signers -----"); + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); - let burn_block_height = signer_test + let epochs = signer_test .running_nodes - .btc_regtest_controller - .get_headers_height(); - let accounts_to_check: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); - for stacker_sk in new_signer_private_keys.iter() { - let pox_addr = PoxAddress::from_legacy( - AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, - ); - let pox_addr_tuple: clarity::vm::Value = - pox_addr.clone().as_clarity_tuple().unwrap().into(); - let signature = make_pox_4_signer_key_signature( - &pox_addr, - &stacker_sk, - reward_cycle.into(), - &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, - 1_u128, - u128::MAX, - 1, - ) - .unwrap() - .to_rsv(); + .conf + .burnchain + .epochs + .clone() + .unwrap(); + let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary - let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); - let stacking_tx = tests::make_contract_call( - &stacker_sk, - 0, - 1000, - signer_test.running_nodes.conf.burnchain.chain_id, - &StacksAddress::burn_address(false), - "pox-4", - "stack-stx", - &[ - clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), - pox_addr_tuple.clone(), - clarity::vm::Value::UInt(burn_block_height as u128), - clarity::vm::Value::UInt(1), - clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) - .unwrap(), - clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), - clarity::vm::Value::UInt(u128::MAX), - clarity::vm::Value::UInt(1), - ], - ); - submit_tx(&http_origin, &stacking_tx); - } + signer_test.boot_to_epoch_25_reward_cycle(); - wait_for(60, || { - Ok(accounts_to_check - .iter() - .all(|acct| get_account(&http_origin, acct).nonce >= 1)) - }) - .expect("Timed out waiting for stacking txs to be mined"); + info!("------------------------- Reached Epoch 2.5 Reward Cycle-------------------------"); - signer_test.mine_nakamoto_block(short_timeout); + // Mine until epoch 3.0 and ensure that no more mock signatures are received + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); + assert_eq!(signer_slot_ids.len(), num_signers); - let next_reward_cycle = reward_cycle.saturating_add(1); + let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); - let next_cycle_height = signer_test + // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. + while signer_test .running_nodes .btc_regtest_controller - .get_burnchain() - .nakamoto_first_block_of_cycle(next_reward_cycle) - .saturating_add(1); - - info!("---- Mining to next reward set calculation -----"); - signer_test.run_until_burnchain_height_nakamoto( - Duration::from_secs(60), - next_cycle_height.saturating_sub(3), - new_num_signers, - ); - - // Verify that the new reward set is the new signers - let reward_set = signer_test.get_reward_set_signers(next_reward_cycle); - for signer in reward_set.iter() { - assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); - assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); + .get_headers_height() + < epoch_3_boundary + { + let mut mock_block_mesage = None; + let mock_poll_time = Instant::now(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + let current_burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + debug!("Waiting for mock miner message for burn block height {current_burn_block_height}"); + while mock_block_mesage.is_none() { + std::thread::sleep(Duration::from_millis(100)); + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks + .into_iter() + .filter_map(|chunk| { + if chunk.contract_id != miners_stackerdb_contract { + return None; + } + Some(chunk.modified_slots) + }) + .flatten() + { + if chunk.data.is_empty() { + continue; + } + let SignerMessage::MockBlock(mock_block) = + SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage") + else { + continue; + }; + if mock_block.mock_proposal.peer_info.burn_block_height == current_burn_block_height + { + mock_block + .mock_signatures + .iter() + .for_each(|mock_signature| { + assert!(signer_public_keys.iter().any(|signer| { + mock_signature + .verify( + &StacksPublicKey::from_slice(signer.to_bytes().as_slice()) + .unwrap(), + ) + .expect("Failed to verify mock signature") + })); + }); + mock_block_mesage = Some(mock_block); + break; + } + } + assert!( + mock_poll_time.elapsed() <= Duration::from_secs(15), + "Failed to find mock miner message within timeout" + ); + } } +} - info!( - "---- Mining to the next reward cycle (block {}) -----", - next_cycle_height - ); - signer_test.run_until_burnchain_height_nakamoto( +#[test] +#[ignore] +/// This test asserts that signer set rollover works as expected. +/// Specifically, if a new set of signers are registered for an upcoming reward cycle, +/// old signers shut down operation and the new signers take over with the commencement of +/// the next reward cycle. +fn signer_set_rollover() { + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let new_num_signers = 4; + + let new_signer_private_keys: Vec<_> = (0..new_num_signers) + .map(|_| StacksPrivateKey::new()) + .collect(); + let new_signer_public_keys: Vec<_> = new_signer_private_keys + .iter() + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) + .collect(); + let new_signer_addresses: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let mut initial_balances = new_signer_addresses + .iter() + .map(|addr| (*addr, POX_4_DEFAULT_STACKER_BALANCE)) + .collect::>(); + + initial_balances.push((sender_addr, (send_amt + send_fee) * 4)); + + let run_stamp = rand::random(); + + let rpc_port = 51024; + let rpc_bind = format!("127.0.0.1:{rpc_port}"); + + // Setup the new signers that will take over + let new_signer_configs = build_signer_config_tomls( + &new_signer_private_keys, + &rpc_bind, + Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. + &Network::Testnet, + "12345", + run_stamp, + 3000 + num_signers, + Some(100_000), + None, + Some(9000 + num_signers), + None, + ); + + let new_spawned_signers: Vec<_> = new_signer_configs + .iter() + .map(|conf| { + info!("spawning signer"); + let signer_config = SignerConfig::load_from_str(conf).unwrap(); + SpawnedSigner::new(signer_config) + }) + .collect(); + + // Boot with some initial signer set + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + initial_balances, + |_| {}, + |naka_conf| { + for toml in new_signer_configs.clone() { + let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + info!( + "---- Adding signer endpoint to naka conf ({}) ----", + signer_config.endpoint + ); + + naka_conf.events_observers.insert(EventObserverConfig { + endpoint: format!("{}", signer_config.endpoint), + events_keys: vec![ + EventKeyType::StackerDBChunks, + EventKeyType::BlockProposal, + EventKeyType::BurnchainBlocks, + ], + timeout_ms: 1000, + }); + } + naka_conf.node.rpc_bind = rpc_bind.clone(); + }, + None, + None, + ); + assert_eq!( + new_spawned_signers[0].config.node_host, + signer_test.running_nodes.conf.node.rpc_bind + ); + // Only stack for one cycle so that the signer set changes + signer_test.num_stacking_cycles = 1_u64; + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = Duration::from_secs(20); + + // Verify that naka_conf has our new signer's event observers + for toml in &new_signer_configs { + let signer_config = SignerConfig::load_from_str(toml).unwrap(); + let endpoint = format!("{}", signer_config.endpoint); + assert!(signer_test + .running_nodes + .conf + .events_observers + .iter() + .any(|observer| observer.endpoint == endpoint)); + } + + // Advance to the first reward cycle, stacking to the old signers beforehand + + info!("---- Booting to epoch 3 -----"); + signer_test.boot_to_epoch_3(); + + // verify that the first reward cycle has the old signers in the reward set + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_test_public_keys: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) + .collect(); + + info!("---- Verifying that the current signers are the old signers ----"); + let current_signers = signer_test.get_reward_set_signers(reward_cycle); + assert_eq!(current_signers.len(), num_signers); + // Verify that the current signers are the same as the old signers + for signer in current_signers.iter() { + assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(!new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!("---- Mining a block to trigger the signer set -----"); + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + signer_test.mine_nakamoto_block(short_timeout); + let mined_block = test_observer::get_mined_nakamoto_blocks().pop().unwrap(); + let block_sighash = mined_block.signer_signature_hash; + let signer_signatures = mined_block.signer_signature; + + // verify the mined_block signatures against the OLD signer set + for signature in signer_signatures.iter() { + let pk = Secp256k1PublicKey::recover_to_pubkey(block_sighash.bits(), signature) + .expect("FATAL: Failed to recover pubkey from block sighash"); + assert!(signer_test_public_keys.contains(&pk.to_bytes_compressed())); + assert!(!new_signer_public_keys.contains(&pk.to_bytes_compressed())); + } + + // advance to the next reward cycle, stacking to the new signers beforehand + let reward_cycle = signer_test.get_current_reward_cycle(); + + info!("---- Stacking new signers -----"); + + let burn_block_height = signer_test + .running_nodes + .btc_regtest_controller + .get_headers_height(); + let accounts_to_check: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); + for stacker_sk in new_signer_private_keys.iter() { + let pox_addr = PoxAddress::from_legacy( + AddressHashMode::SerializeP2PKH, + tests::to_addr(stacker_sk).bytes, + ); + let pox_addr_tuple: clarity::vm::Value = + pox_addr.clone().as_clarity_tuple().unwrap().into(); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + stacker_sk, + reward_cycle.into(), + &Pox4SignatureTopic::StackStx, + CHAIN_ID_TESTNET, + 1_u128, + u128::MAX, + 1, + ) + .unwrap() + .to_rsv(); + + let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); + let stacking_tx = tests::make_contract_call( + stacker_sk, + 0, + 1000, + signer_test.running_nodes.conf.burnchain.chain_id, + &StacksAddress::burn_address(false), + "pox-4", + "stack-stx", + &[ + clarity::vm::Value::UInt(POX_4_DEFAULT_STACKER_STX_AMT), + pox_addr_tuple.clone(), + clarity::vm::Value::UInt(burn_block_height as u128), + clarity::vm::Value::UInt(1), + clarity::vm::Value::some(clarity::vm::Value::buff_from(signature).unwrap()) + .unwrap(), + clarity::vm::Value::buff_from(signer_pk.to_bytes_compressed()).unwrap(), + clarity::vm::Value::UInt(u128::MAX), + clarity::vm::Value::UInt(1), + ], + ); + submit_tx(&http_origin, &stacking_tx); + } + + wait_for(60, || { + Ok(accounts_to_check + .iter() + .all(|acct| get_account(&http_origin, acct).nonce >= 1)) + }) + .expect("Timed out waiting for stacking txs to be mined"); + + signer_test.mine_nakamoto_block(short_timeout); + + let next_reward_cycle = reward_cycle.saturating_add(1); + + let next_cycle_height = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .nakamoto_first_block_of_cycle(next_reward_cycle) + .saturating_add(1); + + info!("---- Mining to next reward set calculation -----"); + signer_test.run_until_burnchain_height_nakamoto( + Duration::from_secs(60), + next_cycle_height.saturating_sub(3), + new_num_signers, + ); + + // Verify that the new reward set is the new signers + let reward_set = signer_test.get_reward_set_signers(next_reward_cycle); + for signer in reward_set.iter() { + assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); + assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); + } + + info!("---- Mining to the next reward cycle (block {next_cycle_height}) -----",); + signer_test.run_until_burnchain_height_nakamoto( Duration::from_secs(60), next_cycle_height, new_num_signers, @@ -3204,7 +3600,7 @@ fn signer_set_rollover() { info!("---- Verifying that the current signers are the new signers ----"); let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); - assert_eq!(current_signers.len(), new_num_signers as usize); + assert_eq!(current_signers.len(), new_num_signers); for signer in current_signers.iter() { assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); @@ -3262,13 +3658,12 @@ fn min_gap_between_blocks() { let send_amt = 100; let send_fee = 180; - let mut sender_nonce = 0; let interim_blocks = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let time_between_blocks_ms = 10_000; let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * interim_blocks)], + vec![(sender_addr, (send_amt + send_fee) * interim_blocks)], |_config| {}, |config| { config.miner.min_time_between_blocks_ms = time_between_blocks_ms; @@ -3294,13 +3689,12 @@ fn min_gap_between_blocks() { // submit a tx so that the miner will mine an extra block let transfer_tx = make_stacks_transfer( &sender_sk, - sender_nonce, + interim_block_ix, // same as the sender nonce send_fee, signer_test.running_nodes.conf.burnchain.chain_id, &recipient, send_amt, ); - sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block to be processed"); @@ -3312,7 +3706,7 @@ fn min_gap_between_blocks() { Ok(blocks_processed > blocks_processed_before) }) .unwrap(); - info!("Mined interim block:{}", interim_block_ix); + info!("Mined interim block:{interim_block_ix}"); } wait_for(60, || { @@ -3426,7 +3820,7 @@ fn duplicate_signers() { }) .filter_map(|message| match message { SignerMessage::BlockResponse(BlockResponse::Accepted(m)) => { - info!("Message(accepted): {:?}", &m); + info!("Message(accepted): {m:?}"); Some(m) } _ => { @@ -3503,7 +3897,7 @@ fn multiple_miners_with_nakamoto_blocks() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![( - sender_addr.clone(), + sender_addr, (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { @@ -3542,7 +3936,7 @@ fn multiple_miners_with_nakamoto_blocks() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3565,7 +3959,7 @@ fn multiple_miners_with_nakamoto_blocks() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -3667,10 +4061,7 @@ fn multiple_miners_with_nakamoto_blocks() { Ok(blocks_processed > blocks_processed_before) }) .unwrap(); - info!( - "Mined interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); + info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); } let blocks = get_nakamoto_headers(&conf); @@ -3681,7 +4072,7 @@ fn multiple_miners_with_nakamoto_blocks() { if seen_burn_hashes.contains(&header.burn_header_hash) { continue; } - seen_burn_hashes.insert(header.burn_header_hash.clone()); + seen_burn_hashes.insert(header.burn_header_hash); let header = header.anchored_header.as_stacks_nakamoto().unwrap(); if miner_1_pk @@ -3703,10 +4094,7 @@ fn multiple_miners_with_nakamoto_blocks() { miner_2_tenures += 1; } } - info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}", - miner_1_tenures, miner_2_tenures - ); + info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); } info!( @@ -3724,10 +4112,7 @@ fn multiple_miners_with_nakamoto_blocks() { peer_1_height, pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) ); - assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() - ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); rl2_coord_channels .lock() .expect("Mutex poisoned") @@ -3777,7 +4162,7 @@ fn partial_tenure_fork() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![( - sender_addr.clone(), + sender_addr, (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { @@ -3812,7 +4197,7 @@ fn partial_tenure_fork() { panic!("Expected epochs to be set"); } }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3833,7 +4218,7 @@ fn partial_tenure_fork() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -4057,14 +4442,11 @@ fn partial_tenure_fork() { blocks = interim_block_ix; break; } else { - panic!("Failed to submit tx: {}", e); + panic!("Failed to submit tx: {e}"); } } } - info!( - "Attempted to mine interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); + info!("Attempted to mine interim block {btc_blocks_mined}:{interim_block_ix}"); } if miner == 1 { @@ -4084,81 +4466,716 @@ fn partial_tenure_fork() { if miner == 1 { assert_eq!(mined_1, mined_before_1 + blocks + 1); + } else if miner_2_tenures < min_miner_2_tenures { + assert_eq!(mined_2, mined_before_2 + blocks + 1); } else { - if miner_2_tenures < min_miner_2_tenures { - assert_eq!(mined_2, mined_before_2 + blocks + 1); - } else { - // Miner 2 should have mined 0 blocks after the fork - assert_eq!(mined_2, mined_before_2); - } + // Miner 2 should have mined 0 blocks after the fork + assert_eq!(mined_2, mined_before_2); } } info!( - "New chain info 1: {:?}", - get_chain_info(&signer_test.running_nodes.conf) + "New chain info 1: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + assert_eq!(peer_2_height, ignore_block - 1); + // The height may be higher than expected due to extra transactions waiting + // to be mined during the forking miner's tenure. + // We cannot guarantee due to TooMuchChaining that the miner will mine inter_blocks_per_tenure + // Must be at least the number of blocks mined by miner 1 and the number of blocks mined by miner 2 + // before the fork was initiated + assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_miner_2_blocks); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); + + let sortdb = SortitionDB::open( + &conf_node_2.get_burn_db_file_path(), + false, + conf_node_2.get_burnchain().pox_constants, + ) + .unwrap(); + + let (chainstate, _) = StacksChainState::open( + false, + conf_node_2.burnchain.chain_id, + &conf_node_2.get_chainstate_path_str(), + None, + ) + .unwrap(); + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + assert_eq!(tip.stacks_block_height, ignore_block - 1); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that signers that accept a block locally, but that was rejected globally will accept a subsequent attempt +/// by the miner essentially reorg their prior locally accepted/signed block, i.e. the globally rejected block overrides +/// their local view. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by >30% of the signers. +/// The miner then attempts to mine N+1', and all signers accept the block. +/// +/// Test Assertion: +/// Stacks tip advances to N+1' +fn locally_accepted_blocks_overriden_by_global_rejection() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 3; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let short_timeout_secs = 20; + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + wait_for(short_timeout_secs, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for stacks block N to be mined"); + sender_nonce += 1; + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + signer_test + .wait_for_block_acceptance( + short_timeout_secs, + &block_n.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N"); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers / 2 + num_signers % 2) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(rejecting_signers.clone()); + test_observer::clear(); + // Make a new stacks transaction to create a different block signature, but make sure to propose it + // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} to mine block N+1"); + + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + // We cannot gaurantee that ALL signers will reject due to the testing directive as we may hit majority first..So ensure that we only assert that up to the threshold number rejected + signer_test + .wait_for_block_rejections(short_timeout_secs, &rejecting_signers) + .expect("Timed out waiting for block rejection of N+1"); + + assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!(info_before, info_after); + // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n); + + info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} to mine block N+1'"); + + wait_for(short_timeout_secs, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height + && test_observer::get_mined_nakamoto_blocks().last().unwrap() != block_n_1) + }) + .expect("Timed out waiting for stacks block N+1' to be mined"); + let blocks_after = mined_blocks.load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before + 1); + + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1 + ); + // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1_prime = nakamoto_blocks.last().unwrap(); + assert_eq!( + info_after.stacks_tip.to_string(), + block_n_1_prime.block_hash + ); + assert_ne!(block_n_1_prime, block_n_1); + // Verify that all signers accepted the new block proposal + signer_test + .wait_for_block_acceptance( + short_timeout_secs, + &block_n_1_prime.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+1'"); +} + +#[test] +#[ignore] +/// Test that signers that reject a block locally, but that was accepted globally will accept +/// a subsequent block built on top of the accepted block +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by <30% of the signers. +/// The miner then attempts to mine N+2, and all signers accept the block. +/// +/// Test Assertion: +/// Stacks tip advances to N+2 +fn locally_rejected_blocks_overriden_by_global_acceptance() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 3; + + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + // submit a tx so that the miner will mine a stacks block N + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); + + wait_for(short_timeout, || { + Ok(signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for N to be mined and processed"); + + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); + + info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); + // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted + let rejecting_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 3 / 10) + .collect(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(rejecting_signers.clone()); + test_observer::clear(); + + // submit a tx so that the miner will mine a stacks block N+1 + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N+1"); + + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for stacks block N+1 to be mined"); + + signer_test + .wait_for_block_rejections(short_timeout, &rejecting_signers) + .expect("Timed out waiting for block rejection of N+1"); + + // Assert the block was mined + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_before + 1, mined_blocks.load(Ordering::SeqCst)); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + assert_ne!(block_n_1, block_n); + + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_1.signer_signature_hash, + &all_signers[num_signers * 3 / 10 + 1..], + ) + .expect("Timed out waiting for block acceptance of N+1"); + + info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); + // Ensure that all signers accept the block proposal N+2 + let info_before = signer_test.stacks_client.get_peer_info().unwrap(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + + // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N+2"); + wait_for(30, || { + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && signer_test + .stacks_client + .get_peer_info() + .unwrap() + .stacks_tip_height + > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for stacks block N+2 to be mined"); + let blocks_after = mined_blocks.load(Ordering::SeqCst); + assert_eq!(blocks_after, blocks_before + 1); + + let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height, + ); + // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_2 = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); + assert_ne!(block_n_2, block_n_1); + + // Make sure that ALL signers accepted the block proposal + signer_test + .wait_for_block_acceptance( + short_timeout, + &block_n_2.signer_signature_hash, + &all_signers, + ) + .expect("Timed out waiting for block acceptance of N+2"); +} + +#[test] +#[ignore] +/// Test that signers that have accepted a locally signed block N+1 built in tenure A can sign a block proposed during a +/// new tenure B built upon the last globally accepted block N if the timeout is exceeded, i.e. a reorg can occur at a tenure boundary. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but <30% accept it. The remaining signers +/// do not make a decision on the block. A new tenure begins and the miner proposes a new block N+1' which all signers accept. +/// +/// Test Assertion: +/// Stacks tip advances to N+1' +fn reorg_locally_accepted_blocks_across_tenures_succeeds() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 2; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Just accept all reorg attempts + config.tenure_last_block_proposal_timeout = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + None, + None, + ); + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; + signer_test.boot_to_epoch_3(); + info!("------------------------- Starting Tenure A -------------------------"); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + info!("Submitted tx {tx} in to mine block N"); + wait_for(short_timeout, || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected + let ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 7 / 10) + .collect(); + let non_ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .skip(num_signers * 7 / 10) + .collect(); + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(ignoring_signers.clone()); + // Clear the stackerdb chunks + test_observer::clear(); + + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + + info!("Submitted tx {tx} in to attempt to mine block N+1"); + wait_for(short_timeout, || { + let accepted_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + non_ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }) + } + _ => None, + } + }) + .collect::>(); + Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance"); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + + info!("------------------------- Starting Tenure B -------------------------"); + // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!( + "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" ); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(Vec::new()); + wait_for(short_timeout, || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); - info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); - - let peer_1_height = get_chain_info(&conf).stacks_tip_height; - let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; - assert_eq!(peer_2_height, ignore_block - 1); - // The height may be higher than expected due to extra transactions waiting - // to be mined during the forking miner's tenure. - // We cannot guarantee due to TooMuchChaining that the miner will mine inter_blocks_per_tenure - // Must be at least the number of blocks mined by miner 1 and the number of blocks mined by miner 2 - // before the fork was initiated - assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_miner_2_blocks); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height ); - let sortdb = SortitionDB::open( - &conf_node_2.get_burn_db_file_path(), - false, - conf_node_2.get_burnchain().pox_constants, - ) - .unwrap(); + // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1_prime = nakamoto_blocks.last().unwrap(); + assert_eq!( + info_after.stacks_tip.to_string(), + block_n_1_prime.block_hash + ); + assert_ne!(block_n_1_prime, block_n); - let (chainstate, _) = StacksChainState::open( - false, - conf_node_2.burnchain.chain_id, - &conf_node_2.get_chainstate_path_str(), - None, - ) - .unwrap(); - let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() - .unwrap(); - assert_eq!(tip.stacks_block_height, ignore_block - 1); - rl2_coord_channels - .lock() - .expect("Mutex poisoned") - .stop_chains_coordinator(); - run_loop_stopper_2.store(false, Ordering::SeqCst); - run_loop_2_thread.join().unwrap(); - signer_test.shutdown(); + // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure + signer_test + .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N+1'"); } #[test] #[ignore] -/// Test that signers that accept a block locally, but that was rejected globally will accept a subsequent attempt -/// by the miner essentially reorg their prior locally accepted/signed block, i.e. the globally rejected block overrides -/// their local view. +/// Test that signers that have accepted a locally signed block N+1 built in tenure A cannot sign a block proposed during a +/// new tenure B built upon the last globally accepted block N if the timeout is not exceeded, i.e. a reorg cannot occur at a tenure boundary +/// before the specified timeout has been exceeded. /// /// Test Setup: /// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. /// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. /// /// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by >30% of the signers. -/// The miner then attempts to mine N+1', and all signers accept the block. +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but <30% accept it. The remaining signers +/// do not make a decision on the block. A new tenure begins and the miner proposes a new block N+1' which all signers reject as the timeout +/// has not been exceeded. /// /// Test Assertion: -/// Stacks tip advances to N+1' -fn locally_accepted_blocks_overriden_by_global_rejection() { +/// Stacks tip remains at N. +fn reorg_locally_accepted_blocks_across_tenures_fails() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -4174,27 +5191,35 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let nmb_txs = 3; + let nmb_txs = 2; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let short_timeout_secs = 20; - let mut signer_test: SignerTest = SignerTest::new( + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Do not alow any reorg attempts essentially + config.tenure_last_block_proposal_timeout = Duration::from_secs(100_000); + }, + |_| {}, + None, + None, ); - - let all_signers: Vec<_> = signer_test + let all_signers = signer_test .signer_stacks_private_keys .iter() .map(StacksPublicKey::from_private) - .collect(); - + .collect::>(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let short_timeout = 30; signer_test.boot_to_epoch_3(); - + info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); - let info_before = signer_test.stacks_client.get_peer_info().unwrap(); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer( @@ -4206,19 +5231,22 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { send_amt, ); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout_secs, || { - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before - && signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_height - > info_before.stacks_tip_height) + wait_for(short_timeout, || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) }) - .expect("Timed out waiting for stacks block N to be mined"); - sender_nonce += 1; - let info_after = signer_test.stacks_client.get_peer_info().unwrap(); + .expect("Timed out waiting for block to be mined and processed"); + + // Ensure that the block was accepted globally so the stacks tip has advanced to N + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); assert_eq!( info_before.stacks_tip_height + 1, info_after.stacks_tip_height @@ -4226,26 +5254,32 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); - signer_test - .wait_for_block_acceptance( - short_timeout_secs, - &block_n.signer_signature_hash, - &all_signers, - ) - .expect("Timed out waiting for block acceptance of N"); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); - // Make half of the signers reject the block proposal by the miner to ensure its marked globally rejected - let rejecting_signers: Vec<_> = all_signers + // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected + let ignoring_signers: Vec<_> = all_signers .iter() .cloned() - .take(num_signers / 2 + num_signers % 2) + .take(num_signers * 7 / 10) .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL + let non_ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .skip(num_signers * 7 / 10) + .collect(); + TEST_IGNORE_ALL_BLOCK_PROPOSALS .lock() .unwrap() - .replace(rejecting_signers.clone()); + .replace(ignoring_signers.clone()); + // Clear the stackerdb chunks test_observer::clear(); + + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -4255,95 +5289,116 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { send_amt, ); let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} to mine block N+1"); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - // We cannot gaurantee that ALL signers will reject due to the testing directive as we may hit majority first..So ensure that we only assert that up to the threshold number rejected - signer_test - .wait_for_block_rejections(short_timeout_secs, &rejecting_signers) - .expect("Timed out waiting for block rejection of N+1"); + info!("Submitted tx {tx} in to attempt to mine block N+1"); + wait_for(short_timeout, || { + let accepted_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + non_ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }) + } + _ => None, + } + }) + .collect::>(); + Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance"); - assert_eq!(blocks_before, mined_blocks.load(Ordering::SeqCst)); - let info_after = signer_test.stacks_client.get_peer_info().unwrap(); - assert_eq!(info_before, info_after); - // Ensure that the block was not accepted globally so the stacks tip has not advanced to N+1 + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1 = nakamoto_blocks.last().unwrap(); assert_ne!(block_n_1, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); - info!("------------------------- Test Mine Nakamoto Block N+1' -------------------------"); - let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); + info!("------------------------- Starting Tenure B -------------------------"); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} to mine block N+1'"); + // Clear the test observer so any old rejections are not counted + test_observer::clear(); - wait_for(short_timeout_secs, || { - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before - && signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_height - > info_before.stacks_tip_height - && test_observer::get_mined_nakamoto_blocks().last().unwrap() != block_n_1) - }) - .expect("Timed out waiting for stacks block N+1' to be mined"); - let blocks_after = mined_blocks.load(Ordering::SeqCst); - assert_eq!(blocks_after, blocks_before + 1); + // Start a new tenure and ensure the we see the expected rejections + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let rejected_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signature, + signer_signature_hash, + .. + })) => non_ignoring_signers.iter().find(|key| { + key.verify(signer_signature_hash.bits(), &signature).is_ok() + }), + _ => None, + } + }) + .collect::>(); + Ok(rejected_signers.len() + ignoring_signers.len() == num_signers) + }, + ) + .expect("FAIL: Timed out waiting for block proposal rejections"); - let info_after = signer_test.stacks_client.get_peer_info().unwrap(); - assert_eq!( - info_after.stacks_tip_height, - info_before.stacks_tip_height + 1 - ); - // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after.stacks_tip, info_before.stacks_tip); + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1' let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_1_prime = nakamoto_blocks.last().unwrap(); - assert_eq!( + assert_ne!(block_n_1, block_n_1_prime); + assert_ne!( info_after.stacks_tip.to_string(), block_n_1_prime.block_hash ); - assert_ne!(block_n_1_prime, block_n_1); - // Verify that all signers accepted the new block proposal - signer_test - .wait_for_block_acceptance( - short_timeout_secs, - &block_n_1_prime.signer_signature_hash, - &all_signers, - ) - .expect("Timed out waiting for block acceptance of N+1'"); } #[test] #[ignore] -/// Test that signers that reject a block locally, but that was accepted globally will accept -/// a subsequent block built on top of the accepted block +/// Test that when 70% of signers accept a block, mark it globally accepted, but a miner ends its tenure +/// before it receives these signatures, the miner can recover in the following tenure. /// /// Test Setup: /// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. /// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. /// /// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but rejected by <30% of the signers. -/// The miner then attempts to mine N+2, and all signers accept the block. +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but >70% accept it. +/// The signers delay broadcasting the block and the miner ends its tenure before it receives these signatures. The +/// miner will propose an invalid block N+1' which all signers reject. The broadcast delay is removed and the miner +/// proposes a new block N+2 which all signers accept. /// /// Test Assertion: /// Stacks tip advances to N+2 -fn locally_rejected_blocks_overriden_by_global_acceptance() { +fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -4360,31 +5415,35 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { let send_amt = 100; let send_fee = 180; let nmb_txs = 3; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); - - let all_signers: Vec<_> = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect(); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = 30; signer_test.boot_to_epoch_3(); + info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + + // wait until we get a sortition. + // we might miss a block-commit at the start of epoch 3 + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + wait_for(30, || { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + Ok(tip.sortition) + }) + .expect("Timed out waiting for sortition"); + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - - // submit a tx so that the miner will mine a stacks block N + // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; let transfer_tx = make_stacks_transfer( &sender_sk, @@ -4395,19 +5454,21 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { send_amt, ); let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout, || { - Ok(signer_test + // a tenure has begun, so wait until we mine a block + wait_for(30, || { + let new_height = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") - .stacks_tip_height - > info_before.stacks_tip_height) + .stacks_tip_height; + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && new_height > info_before.stacks_tip_height) }) - .expect("Timed out waiting for N to be mined and processed"); + .expect("Timed out waiting for block to be mined and processed"); + sender_nonce += 1; let info_after = signer_test .stacks_client .get_peer_info() @@ -4417,35 +5478,175 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { info_after.stacks_tip_height ); - // Ensure that the block was accepted globally so the stacks tip has advanced to N let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); - // Make sure that ALL signers accepted the block proposal - signer_test - .wait_for_block_acceptance(short_timeout, &block_n.signer_signature_hash, &all_signers) - .expect("Timed out waiting for block acceptance of N"); - - info!("------------------------- Mine Nakamoto Block N+1 -------------------------"); - // Make less than 30% of the signers reject the block and ensure it is STILL marked globally accepted - let rejecting_signers: Vec<_> = all_signers - .iter() - .cloned() - .take(num_signers * 3 / 10) - .collect(); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(rejecting_signers.clone()); + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Propose a valid block, but force the miner to ignore the returned signatures and delay the block being + // broadcasted to the miner so it can end its tenure before block confirmation obtained + // Clear the stackerdb chunks + info!("Forcing miner to ignore block responses for block N+1"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + info!("Delaying signer block N+1 broadcasting to the miner"); + TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); test_observer::clear(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + sender_nonce += 1; + + let tx = submit_tx(&http_origin, &transfer_tx); + + info!("Submitted tx {tx} in to attempt to mine block N+1"); + let mut block = None; + wait_for(30, || { + block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + let Some(block) = &block else { + return Ok(false); + }; + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + if block.header.signer_signature_hash() == accepted.signer_signature_hash { + Some(accepted.signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("Test timed out while waiting for signers signatures for first block proposal"); + let block = block.unwrap(); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was not yet broadcasted to the miner so the stacks tip has NOT advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_same = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_same, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_same.block_hash); + + info!("------------------------- Starting Tenure B -------------------------"); + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let commits_count = commits_submitted.load(Ordering::SeqCst); + Ok(commits_count > commits_before) + }, + ) + .unwrap(); + + info!( + "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" + ); + // Wait for the miner to propose a new invalid block N+1' + let mut rejected_block = None; + wait_for(30, || { + rejected_block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash != block.header.consensus_hash { + assert!( + proposal.block.header.chain_length == block.header.chain_length + ); + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + Ok(rejected_block.is_some()) + }) + .expect("Timed out waiting for block proposal of N+1' block proposal"); + + info!("Allowing miner to accept block responses again. "); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + info!("Allowing signers to broadcast block N+1 to the miner"); + TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); + + // Assert the N+1' block was rejected + let rejected_block = rejected_block.unwrap(); + wait_for(30, || { + let stackerdb_events = test_observer::get_stackerdb_chunks(); + let block_rejections = stackerdb_events + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { + if rejection.signer_signature_hash + == rejected_block.header.signer_signature_hash() + { + Some(rejection) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(block_rejections.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal rejections"); - // submit a tx so that the miner will mine a stacks block N+1 - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); + // Induce block N+2 to get mined let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -4454,426 +5655,482 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { &recipient, send_amt, ); + let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N+1"); + info!("Submitted tx {tx} in to attempt to mine block N+2"); + info!("------------------------- Asserting a both N+1 and N+2 are accepted -------------------------"); wait_for(30, || { - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before - && signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_height - > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for stacks block N+1 to be mined"); + // N.B. have to use /v2/info because mined_blocks only increments if the miner's signing + // coordinator returns successfully (meaning, mined_blocks won't increment for block N+1) + let info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); - signer_test - .wait_for_block_rejections(short_timeout, &rejecting_signers) - .expect("Timed out waiting for block rejection of N+1"); + Ok(info_before.stacks_tip_height + 2 <= info.stacks_tip_height) + }) + .expect("Timed out waiting for blocks to be mined"); - // Assert the block was mined let info_after = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - assert_eq!(blocks_before + 1, mined_blocks.load(Ordering::SeqCst)); + assert_eq!( - info_before.stacks_tip_height + 1, + info_before.stacks_tip_height + 2, info_after.stacks_tip_height ); + let nmb_signatures = signer_test + .stacks_client + .get_tenure_tip(&info_after.stacks_tip_consensus_hash) + .expect("Failed to get tip") + .as_stacks_nakamoto() + .expect("Not a Nakamoto block") + .signer_signature + .len(); + assert!(nmb_signatures >= num_signers * 7 / 10); - // Ensure that the block was still accepted globally so the stacks tip has advanced to N+1 - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_1 = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n_1.block_hash); - assert_ne!(block_n_1, block_n); - - signer_test - .wait_for_block_acceptance( - short_timeout, - &block_n_1.signer_signature_hash, - &all_signers[num_signers * 3 / 10 + 1..], - ) - .expect("Timed out waiting for block acceptance of N+1"); - - info!("------------------------- Test Mine Nakamoto Block N+2 -------------------------"); - // Ensure that all signers accept the block proposal N+2 - let info_before = signer_test.stacks_client.get_peer_info().unwrap(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - TEST_REJECT_ALL_BLOCK_PROPOSAL - .lock() - .unwrap() - .replace(Vec::new()); - - // submit a tx so that the miner will mine a stacks block N+2 and ensure ALL signers accept it - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to mine block N+2"); - wait_for(30, || { - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before - && signer_test - .stacks_client - .get_peer_info() - .unwrap() - .stacks_tip_height - > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for stacks block N+2 to be mined"); - let blocks_after = mined_blocks.load(Ordering::SeqCst); - assert_eq!(blocks_after, blocks_before + 1); - - let info_after = signer_test.stacks_client.get_peer_info().unwrap(); - assert_eq!( - info_before.stacks_tip_height + 1, - info_after.stacks_tip_height, - ); // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n_2 = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); - assert_ne!(block_n_2, block_n_1); - - // Make sure that ALL signers accepted the block proposal - signer_test - .wait_for_block_acceptance( - short_timeout, - &block_n_2.signer_signature_hash, - &all_signers, - ) - .expect("Timed out waiting for block acceptance of N+2"); + assert_ne!(block_n_2, block_n); } +/// Test a scenario where: +/// Two miners boot to Nakamoto. +/// Miner 1 wins the first tenure and proposes a block N with a TenureChangePayload +/// Signers accept and the stacks tip advances to N +/// Miner 2 wins the second tenure B but its proposed blocks are rejected by the signers. +/// Mine 2 empty burn blocks (simulate fast blocks scenario) +/// Miner 2 proposes block N+1 with a TenureChangePayload +/// Signers accept and the stacks tip advances to N+1 +/// Miner 2 proposes block N+2 with a TokenTransfer +/// Signers accept and the stacks tip advances to N+2 +/// Mine an empty burn block +/// Miner 2 proposes block N+3 with a TenureExtend +/// Signers accept and the chain advances to N+3 +/// Miner 1 wins the next tenure and proposes a block N+4 with a TenureChangePayload +/// Signers accept and the chain advances to N+4 +/// Asserts: +/// - Block N+1 contains the TenureChangePayload +/// - Block N+2 contains the TokenTransfer +/// - Block N+3 contains the TenureExtend +/// - Block N+4 contains the TenureChangePayload +/// - The stacks tip advances to N+4 #[test] #[ignore] -/// Test that signers that have accept a locally signed block N+1 built in tenure A can sign a block proposed during a -/// new tenure B built upon the last globally accepted block N, i.e. a reorg can occur at a tenure boundary. -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but <30% accept it. The remaining signers -/// do not make a decision on the block. A new tenure begins and the miner proposes a new block N+1' which all signers accept. -/// -/// Test Assertion: -/// Stacks tip advances to N+1' -fn reorg_locally_accepted_blocks_across_tenures_succeeds() { +fn continue_after_fast_block_no_sortition() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); + let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 1; + let sender_nonce = 0; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + naka_mined_blocks: blocks_mined2, + .. + } = run_loop_2.counters(); + + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + // Some helper functions for verifying the blocks contain their expected transactions + let verify_last_block_contains_tenure_change_tx = |cause: TenureChangeCause| { + let blocks = test_observer::get_blocks(); + let tenure_change_tx = &blocks.last().unwrap(); + let transactions = tenure_change_tx["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) => { + assert_eq!(payload.cause, cause); + } + _ => panic!("Expected tenure change transaction, got {parsed:?}"), + }; + }; + + let verify_last_block_contains_transfer_tx = || { + let blocks = test_observer::get_blocks(); + let tenure_change_tx = &blocks.last().unwrap(); + let transactions = tenure_change_tx["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + assert!( + matches!(parsed.payload, TransactionPayload::TokenTransfer(_, _, _)), + "Expected token transfer transaction, got {parsed:?}" + ); + }; + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf.miner.mining_key.unwrap(), + )); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf_node_2.miner.mining_key.unwrap(), + )); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 100; - let send_fee = 180; - let nmb_txs = 2; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], - ); let all_signers = signer_test .signer_stacks_private_keys .iter() .map(StacksPublicKey::from_private) .collect::>(); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = 30; - signer_test.boot_to_epoch_3(); - info!("------------------------- Starting Tenure A -------------------------"); - info!("------------------------- Test Mine Nakamoto Block N -------------------------"); - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let info_before = signer_test + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let starting_peer_height = get_chain_info(&conf).stacks_tip_height; + let starting_burn_height = get_burn_height(); + let mut btc_blocks_mined = 0; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); + .expect("Failed to get peer info") + .stacks_tip_height; - // submit a tx so that the miner will mine a stacks block - let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); - sender_nonce += 1; - info!("Submitted tx {tx} in to mine block N"); - wait_for(short_timeout, || { - let info_after = signer_test + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) }) - .expect("Timed out waiting for block to be mined and processed"); + .unwrap(); - // Ensure that the block was accepted globally so the stacks tip has advanced to N - let info_after = signer_test + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Make Signers Reject All Subsequent Proposals -------------------------"); + + let stacks_height_before = signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!( - info_before.stacks_tip_height + 1, - info_after.stacks_tip_height - ); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + .expect("Failed to get peer info") + .stacks_tip_height; - info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); - // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected - let ignoring_signers: Vec<_> = all_signers - .iter() - .cloned() - .take(num_signers * 7 / 10) - .collect(); - TEST_IGNORE_ALL_BLOCK_PROPOSALS + // Make all signers ignore block proposals + let ignoring_signers = all_signers.to_vec(); + TEST_REJECT_ALL_BLOCK_PROPOSAL .lock() .unwrap() .replace(ignoring_signers.clone()); - // Clear the stackerdb chunks - test_observer::clear(); - // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - let tx = submit_tx(&http_origin, &transfer_tx); + info!("------------------------- Submit Miner 2 Block Commit -------------------------"); + let rejections_before = signer_test + .running_nodes + .nakamoto_blocks_rejected + .load(Ordering::SeqCst); - info!("Submitted tx {tx} in to attempt to mine block N+1"); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - wait_for(short_timeout, || { - let ignored_signers = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - ignoring_signers.iter().find(|key| { - key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) - .is_ok() - }) - } - _ => None, - } - }) - .collect::>(); - Ok(ignored_signers.len() + ignoring_signers.len() == num_signers) + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + // Unpause miner 2's block commits + rl2_skip_commit_op.set(false); + + // Ensure the miner 2 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) }) - .expect("FAIL: Timed out waiting for block proposal acceptance"); + .unwrap(); - let blocks_after = mined_blocks.load(Ordering::SeqCst); - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!(blocks_after, blocks_before); - assert_eq!(info_after, info_before); - // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_1 = nakamoto_blocks.last().unwrap(); - assert_ne!(block_n_1, block_n); - assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + // Make miner 2 also fail to submit any FURTHER block commits + rl2_skip_commit_op.set(true); + + let burn_height_before = get_burn_height(); + + info!("------------------------- Miner 2 Mines an Empty Tenure B -------------------------"; + "burn_height_before" => burn_height_before, + "rejections_before" => rejections_before, + ); - info!("------------------------- Starting Tenure B -------------------------"); - // Start a new tenure and ensure the miner can propose a new block N+1' that is accepted by all signers - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); - let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, - || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count > commits_before) - }, + || Ok(get_burn_height() > burn_height_before), ) .unwrap(); + btc_blocks_mined += 1; - info!( - "------------------------- Mine Nakamoto Block N+1' in Tenure B -------------------------" - ); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - TEST_IGNORE_ALL_BLOCK_PROPOSALS - .lock() - .unwrap() - .replace(Vec::new()); - wait_for(short_timeout, || { - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) - }) - .expect("Timed out waiting for block to be mined and processed"); - - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!( - info_before.stacks_tip_height + 1, - info_after.stacks_tip_height - ); - - // Ensure that the block was accepted globally so the stacks tip has advanced to N+1' - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_1_prime = nakamoto_blocks.last().unwrap(); - assert_eq!( - info_after.stacks_tip.to_string(), - block_n_1_prime.block_hash - ); - assert_ne!(block_n_1_prime, block_n); - - // Make sure that ALL signers accepted the block proposal even though they signed a conflicting one in prior tenure - signer_test - .wait_for_block_acceptance(30, &block_n_1_prime.signer_signature_hash, &all_signers) - .expect("Timed out waiting for block acceptance of N+1'"); -} - -#[test] -#[ignore] -/// Test that when 70% of signers accept a block, mark it globally accepted, but a miner ends its tenure -/// before it receives these signatures, the miner can recover in the following tenure. -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but >70% accept it. -/// The signers delay broadcasting the block and the miner ends its tenure before it receives these signatures. The -/// miner will propose an invalid block N+1' which all signers reject. The broadcast delay is removed and the miner -/// proposes a new block N+2 which all signers accept. -/// -/// Test Assertion: -/// Stacks tip advances to N+2 -fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } + // assure we have a successful sortition that miner B won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); + info!("----- Waiting for block rejections -----"); + let min_rejections = num_signers * 4 / 10; + // Wait until we have some block rejections + wait_for(30, || { + std::thread::sleep(Duration::from_secs(1)); + let chunks = test_observer::get_stackerdb_chunks(); + let rejections: Vec<_> = chunks + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter(|chunk| { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + return false; + }; + matches!( + message, + SignerMessage::BlockResponse(BlockResponse::Rejected(_)) + ) + }) + .collect(); + Ok(rejections.len() >= min_rejections) + }) + .expect("Timed out waiting for block rejections"); - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 100; - let send_fee = 180; - let nmb_txs = 3; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], - ); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - signer_test.boot_to_epoch_3(); + // Mine another couple burn blocks and ensure there is _no_ sortition + info!("------------------------- Mine Two Burn Block(s) with No Sortitions -------------------------"); + for _ in 0..2 { + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let burn_height_before = get_burn_height(); + let commits_before_1 = rl1_commits.load(Ordering::SeqCst); + let commits_before_2 = rl2_commits.load(Ordering::SeqCst); - info!("------------------------- Starting Tenure A -------------------------"); - info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + btc_blocks_mined += 1; - // wait until we get a sortition. - // we might miss a block-commit at the start of epoch 3 - let burnchain = signer_test.running_nodes.conf.get_burnchain(); - let sortdb = burnchain.open_sortition_db(true).unwrap(); + assert_eq!(rl1_commits.load(Ordering::SeqCst), commits_before_1); + assert_eq!(rl2_commits.load(Ordering::SeqCst), commits_before_2); + assert_eq!( + blocks_mined1.load(Ordering::SeqCst), + blocks_processed_before_1 + ); + assert_eq!( + blocks_mined2.load(Ordering::SeqCst), + blocks_processed_before_2 + ); - wait_for(30, || { - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - Ok(tip.sortition) - }) - .expect("Timed out waiting for sortition"); + // assure we have NO sortition + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(!tip.sortition); + } - let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test + // Verify that no Stacks blocks have been mined (signers are ignoring) and no commits have been submitted by either miner + let stacks_height = signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); - // submit a tx so that the miner will mine a stacks block - let mut sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, + .expect("Failed to get peer info") + .stacks_tip_height; + assert_eq!(stacks_height, stacks_height_before); + let stacks_height_before = stacks_height; + + info!("------------------------- Enabling Signer Block Proposals -------------------------"; + "stacks_height" => stacks_height_before, ); - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to mine block N"); - // a tenure has begun, so wait until we mine a block + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + // Allow signers to respond to proposals again + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + + info!("------------------------- Wait for Miner B's Block N -------------------------"); + // wait for the new block to be processed wait_for(30, || { - Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before) + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) }) .expect("Timed out waiting for block to be mined and processed"); - sender_nonce += 1; - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!( - info_before.stacks_tip_height + 1, - info_after.stacks_tip_height + info!( + "------------------------- Verify Tenure Change Tx in Miner B's Block N -------------------------" ); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + info!("------------------------- Wait for Miner B's Block N+1 -------------------------"); - info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); - // Propose a valid block, but force the miner to ignore the returned signatures and delay the block being - // broadcasted to the miner so it can end its tenure before block confirmation obtained - // Clear the stackerdb chunks - info!("Forcing miner to ignore block responses for block N+1"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); - info!("Delaying signer block N+1 broadcasting to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); - test_observer::clear(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test + let nmb_old_blocks = test_observer::get_blocks().len(); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let stacks_height_before = signer_test .stacks_client .get_peer_info() - .expect("Failed to get peer info"); + .expect("Failed to get peer info") + .stacks_tip_height; + // submit a tx so that the miner will mine an extra block let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -4882,197 +6139,97 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { &recipient, send_amt, ); - sender_nonce += 1; - - let tx = submit_tx(&http_origin, &transfer_tx); + submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to attempt to mine block N+1"); - let mut block = None; + // wait for the new block to be processed wait_for(30, || { - block = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .find_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockProposal(proposal) => { - if proposal.block.header.consensus_hash - == info_before.stacks_tip_consensus_hash - { - Some(proposal.block) - } else { - None - } - } - _ => None, - } - }); - let Some(block) = &block else { - return Ok(false); - }; - let signatures = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - if block.header.signer_signature_hash() == accepted.signer_signature_hash { - Some(accepted.signature) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(signatures.len() == num_signers) + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) }) - .expect("Test timed out while waiting for signers signatures for first block proposal"); - let block = block.unwrap(); + .expect("Timed out waiting for block to be mined and processed"); - let blocks_after = mined_blocks.load(Ordering::SeqCst); - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - assert_eq!(blocks_after, blocks_before); - assert_eq!(info_after, info_before); - // Ensure that the block was not yet broadcasted to the miner so the stacks tip has NOT advanced to N+1 - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_same = nakamoto_blocks.last().unwrap(); - assert_ne!(block_n_same, block_n); - assert_ne!(info_after.stacks_tip.to_string(), block_n_same.block_hash); + info!("------------------------- Verify Miner B's Block N+1 -------------------------"); - info!("------------------------- Starting Tenure B -------------------------"); - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); - let commits_before = commits_submitted.load(Ordering::SeqCst); + verify_last_block_contains_transfer_tx(); + + info!("------------------------- Mine An Empty Sortition -------------------------"); + let nmb_old_blocks = test_observer::get_blocks().len(); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count > commits_before) + Ok(get_burn_height() > burn_height_before + && test_observer::get_blocks().len() > nmb_old_blocks) }, ) .unwrap(); + btc_blocks_mined += 1; - info!( - "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" - ); - // Wait for the miner to propose a new invalid block N+1' - let mut rejected_block = None; - wait_for(30, || { - rejected_block = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .find_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockProposal(proposal) => { - if proposal.block.header.consensus_hash != block.header.consensus_hash { - assert!( - proposal.block.header.chain_length == block.header.chain_length - ); - Some(proposal.block) - } else { - None - } - } - _ => None, - } - }); - Ok(rejected_block.is_some()) - }) - .expect("Timed out waiting for block proposal of N+1' block proposal"); - - info!("Allowing miner to accept block responses again. "); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); - info!("Allowing signers to broadcast block N+1 to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); - - // Assert the N+1' block was rejected - let rejected_block = rejected_block.unwrap(); - wait_for(30, || { - let stackerdb_events = test_observer::get_stackerdb_chunks(); - let block_rejections = stackerdb_events - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .filter_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) => { - if rejection.signer_signature_hash - == rejected_block.header.signer_signature_hash() - { - Some(rejection) - } else { - None - } - } - _ => None, - } - }) - .collect::>(); - Ok(block_rejections.len() == num_signers) - }) - .expect("FAIL: Timed out waiting for block proposal rejections"); - - // Induce block N+2 to get mined - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - - let tx = submit_tx(&http_origin, &transfer_tx); - info!("Submitted tx {tx} in to attempt to mine block N+2"); + info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+2 -------------------------"); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); - info!("------------------------- Asserting a both N+1 and N+2 are accepted -------------------------"); + info!("------------------------- Unpause Miner A's Block Commits -------------------------"); + let commits_before_1 = rl1_commits.load(Ordering::SeqCst); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(false); wait_for(30, || { - // N.B. have to use /v2/info because mined_blocks only increments if the miner's signing - // coordinator returns successfully (meaning, mined_blocks won't increment for block N+1) - let info = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); - - Ok(info_before.stacks_tip_height + 2 <= info.stacks_tip_height) + Ok(rl1_commits.load(Ordering::SeqCst) > commits_before_1) }) - .expect("Timed out waiting for blocks to be mined"); - - let info_after = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); + .unwrap(); - assert_eq!( - info_before.stacks_tip_height + 2, - info_after.stacks_tip_height - ); - let nmb_signatures = signer_test - .stacks_client - .get_tenure_tip(&info_after.stacks_tip_consensus_hash) - .expect("Failed to get tip") - .as_stacks_nakamoto() - .expect("Not a Nakamoto block") - .signer_signature - .len(); - assert!(nmb_signatures >= num_signers * 7 / 10); + info!("------------------------- Run Miner A's Tenure -------------------------"); + let nmb_old_blocks = test_observer::get_blocks().len(); + let burn_height_before = get_burn_height(); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && test_observer::get_blocks().len() > nmb_old_blocks) + }, + ) + .unwrap(); + btc_blocks_mined += 1; - // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); - let block_n_2 = nakamoto_blocks.last().unwrap(); - assert_eq!(info_after.stacks_tip.to_string(), block_n_2.block_hash); - assert_ne!(block_n_2, block_n); + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+4 -------------------------"); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!( + "------------------------- Confirm Burn and Stacks Block Heights -------------------------" + ); + let peer_info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); + assert_eq!(peer_info.stacks_tip_height, starting_peer_height + 5); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); } #[test] @@ -5095,10 +6252,8 @@ fn continue_after_tenure_extend() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * 5)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 5)]); let timeout = Duration::from_secs(200); let coord_channel = signer_test.running_nodes.coord_channel.clone(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); @@ -5112,10 +6267,7 @@ fn continue_after_tenure_extend() { signer_test .running_nodes .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(true); + .set(true); // It's possible that we have a pending block commit already. // Mine two BTC blocks to "flush" this commit. @@ -5170,17 +6322,16 @@ fn continue_after_tenure_extend() { match &parsed.payload { TransactionPayload::TenureChange(payload) if payload.cause == TenureChangeCause::Extended => {} - _ => panic!("Expected tenure extend transaction, got {:?}", parsed), + _ => panic!("Expected tenure extend transaction, got {parsed:?}"), }; // Verify that the miner can continue mining in the tenure with the tenure extend info!("------------------------- Mine After Tenure Extend -------------------------"); - let mut sender_nonce = 0; let mut blocks_processed_before = coord_channel .lock() .expect("Mutex poisoned") .get_stacks_blocks_processed(); - for _ in 0..5 { + for sender_nonce in 0..5 { // submit a tx so that the miner will mine an extra block let transfer_tx = make_stacks_transfer( &sender_sk, @@ -5190,7 +6341,6 @@ fn continue_after_tenure_extend() { &recipient, send_amt, ); - sender_nonce += 1; submit_tx(&http_origin, &transfer_tx); info!("Submitted transfer tx and waiting for block proposal"); @@ -5272,13 +6422,13 @@ fn signing_in_0th_tenure_of_reward_cycle() { assert_eq!(signer_test.get_current_reward_cycle(), curr_reward_cycle); for signer in &signer_public_keys { - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + let blocks_signed = get_v3_signer(signer, next_reward_cycle); assert_eq!(blocks_signed, 0); } info!("------------------------- Enter Reward Cycle {next_reward_cycle} -------------------------"); for signer in &signer_public_keys { - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + let blocks_signed = get_v3_signer(signer, next_reward_cycle); assert_eq!(blocks_signed, 0); } let blocks_before = signer_test @@ -5312,7 +6462,7 @@ fn signing_in_0th_tenure_of_reward_cycle() { .unwrap() }) .expect("Unknown signer signature"); - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); + let blocks_signed = get_v3_signer(signer, next_reward_cycle); assert_eq!(blocks_signed, 1); } assert_eq!(signer_test.get_current_reward_cycle(), next_reward_cycle); @@ -5355,7 +6505,7 @@ fn multiple_miners_with_custom_chain_id() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![( - sender_addr.clone(), + sender_addr, (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { @@ -5396,7 +6546,7 @@ fn multiple_miners_with_custom_chain_id() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -5420,7 +6570,7 @@ fn multiple_miners_with_custom_chain_id() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -5522,10 +6672,7 @@ fn multiple_miners_with_custom_chain_id() { Ok(blocks_processed > blocks_processed_before) }) .unwrap(); - info!( - "Mined interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); + info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); } let blocks = get_nakamoto_headers(&conf); @@ -5536,7 +6683,7 @@ fn multiple_miners_with_custom_chain_id() { if seen_burn_hashes.contains(&header.burn_header_hash) { continue; } - seen_burn_hashes.insert(header.burn_header_hash.clone()); + seen_burn_hashes.insert(header.burn_header_hash); let header = header.anchored_header.as_stacks_nakamoto().unwrap(); if miner_1_pk @@ -5558,10 +6705,7 @@ fn multiple_miners_with_custom_chain_id() { miner_2_tenures += 1; } } - info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}", - miner_1_tenures, miner_2_tenures - ); + info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); } info!( @@ -5579,10 +6723,7 @@ fn multiple_miners_with_custom_chain_id() { peer_1_height, pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) ); - assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() - ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); // Verify both nodes have the correct chain id let miner1_info = get_chain_info(&signer_test.running_nodes.conf); @@ -5599,3 +6740,300 @@ fn multiple_miners_with_custom_chain_id() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test checks the behavior of the `block_commit_delay_ms` configuration option. +fn block_commit_delay() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let block_proposal_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; + }, + |config| { + // Set the block commit delay to 10 minutes to ensure no block commit is sent + config.miner.block_commit_delay = Duration::from_secs(600); + }, + None, + None, + ); + + signer_test.boot_to_epoch_3(); + + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine first block"); + + // Ensure that the block commit has been sent before continuing + wait_for(60, || { + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits > commits_before) + }) + .expect("Timed out waiting for block commit after new Stacks block"); + + // Prevent a block from being mined by making signers reject it. + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(all_signers); + + info!("------------------------- Test Mine Burn Block -------------------------"); + let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + // Mine a burn block and wait for it to be processed. + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + Ok(burn_height > burn_height_before) + }, + ) + .unwrap(); + + // Sleep an extra minute to ensure no block commits are sent + sleep_ms(60_000); + + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + assert_eq!(commits, commits_before); + + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + info!("------------------------- Resume Signing -------------------------"); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + + // Wait for a block to be mined + wait_for(60, || { + let blocks = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + Ok(blocks > blocks_before) + }) + .expect("Timed out waiting for block to be mined"); + + // Wait for a block commit to be sent + wait_for(60, || { + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits > commits_before) + }) + .expect("Timed out waiting for block commit after new Stacks block"); + + signer_test.shutdown(); +} + +// Ensures that a signer that successfully submits a block to the node for validation +// will issue ConnectivityIssues rejections if a block submission times out. +// Also ensures that no other proposal gets submitted for validation if we +// are already waiting for a block submission response. +#[test] +#[ignore] +fn block_validation_response_timeout() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let timeout = Duration::from_secs(30); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.block_proposal_validation_timeout = timeout; + }, + |_| {}, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + info!("------------------------- Test Block Validation Stalled -------------------------"); + TEST_VALIDATE_STALL.lock().unwrap().replace(true); + let validation_stall_start = Instant::now(); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + + // submit a tx so that the miner will attempt to mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(30, || { + Ok(signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst) + > proposals_before) + }) + .expect("Timed out waiting for block proposal"); + + assert!( + validation_stall_start.elapsed() < timeout, + "Test was too slow to propose another block before the timeout" + ); + + info!("------------------------- Propose Another Block Before Hitting the Timeout -------------------------"); + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + tenure_last_block_proposal_timeout: Duration::from_secs(30), + block_proposal_timeout: Duration::from_secs(100), + }; + let mut block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + + let info_before = get_chain_info(&signer_test.running_nodes.conf); + // Propose a block to the signers that passes initial checks but will not be submitted to the stacks node due to the submission stall + let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); + block.header.pox_treatment = BitVec::ones(1).unwrap(); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + block.header.chain_length = info_before.stacks_tip_height + 1; + + let block_signer_signature_hash_1 = block.header.signer_signature_hash(); + signer_test.propose_block(block, timeout); + + info!("------------------------- Waiting for Timeout -------------------------"); + // Sleep the necessary timeout to make sure the validation times out. + let elapsed = validation_stall_start.elapsed(); + let wait = timeout.saturating_sub(elapsed); + info!("Sleeping for {} ms", wait.as_millis()); + std::thread::sleep(timeout.saturating_sub(elapsed)); + + info!("------------------------- Wait for Block Rejection Due to Timeout -------------------------"); + // Verify that the signer that submits the block to the node will issue a ConnectivityIssues rejection + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason: _reason, + reason_code, + signer_signature_hash, + .. + })) = message + else { + continue; + }; + // We are waiting for the original block proposal which will have a diff signature to our + // second proposed block. + assert_ne!( + signer_signature_hash, block_signer_signature_hash_1, + "Received a rejection for the wrong block" + ); + if matches!(reason_code, RejectCode::ConnectivityIssues) { + return Ok(true); + } + } + Ok(false) + }) + .expect("Timed out waiting for block proposal rejections"); + // Make sure our chain has still not advanced + let info_after = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!(info_before, info_after); + let info_before = info_after; + info!("Unpausing block validation"); + // Disable the stall and wait for the block to be processed successfully + TEST_VALIDATE_STALL.lock().unwrap().replace(false); + wait_for(30, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be processed"); + + let info_after = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1, + ); + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + let info_before = info_after; + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + + wait_for(30, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }) + .unwrap(); + + let info_after = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1, + ); +} diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index aa620d349b..c68b477b47 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -41,14 +41,13 @@ fn post_stackerdb_chunk( slot_version: u32, ) -> StackerDBChunkAckData { let mut chunk = StackerDBChunkData::new(slot_id, slot_version, data); - chunk.sign(&signer).unwrap(); + chunk.sign(signer).unwrap(); let chunk_body = serde_json::to_string(&chunk).unwrap(); let client = reqwest::blocking::Client::new(); let path = format!( - "{}/v2/stackerdb/{}/{}/chunks", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/chunks", &StacksAddress::from(stackerdb_contract_id.issuer.clone()), stackerdb_contract_id.name ); @@ -60,8 +59,8 @@ fn post_stackerdb_chunk( .unwrap(); if res.status().is_success() { let ack: StackerDBChunkAckData = res.json().unwrap(); - info!("Got stackerdb ack: {:?}", &ack); - return ack; + info!("Got stackerdb ack: {ack:?}"); + ack } else { eprintln!("StackerDB post error: {}", res.text().unwrap()); panic!(""); @@ -76,20 +75,15 @@ fn get_stackerdb_chunk( ) -> Vec { let path = if let Some(version) = slot_version { format!( - "{}/v2/stackerdb/{}/{}/{}/{}", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/{slot_id}/{version}", StacksAddress::from(stackerdb_contract_id.issuer.clone()), stackerdb_contract_id.name, - slot_id, - version ) } else { format!( - "{}/v2/stackerdb/{}/{}/{}", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/{slot_id}", StacksAddress::from(stackerdb_contract_id.issuer.clone()), - stackerdb_contract_id.name, - slot_id + stackerdb_contract_id.name ) }; @@ -97,8 +91,7 @@ fn get_stackerdb_chunk( let res = client.get(&path).send().unwrap(); if res.status().is_success() { - let chunk_data: Vec = res.bytes().unwrap().to_vec(); - return chunk_data; + res.bytes().unwrap().to_vec() } else { eprintln!("Get chunk error: {}", res.text().unwrap()); panic!(""); @@ -115,7 +108,7 @@ fn test_stackerdb_load_store() { let (mut conf, _) = neon_integration_test_conf(); test_observer::register_any(&mut conf); - let privks = vec![ + let privks = [ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R StacksPrivateKey::from_hex( "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", @@ -223,18 +216,18 @@ fn test_stackerdb_load_store() { // write some chunks and read them back for i in 0..3 { - let chunk_str = format!("Hello chunks {}", &i); + let chunk_str = format!("Hello chunks {i}"); let ack = post_stackerdb_chunk( &http_origin, &contract_id, chunk_str.as_bytes().to_vec(), &privks[0], 0, - (i + 1) as u32, + i + 1, ); - debug!("ACK: {:?}", &ack); + debug!("ACK: {ack:?}"); - let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, Some((i + 1) as u32)); + let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, Some(i + 1)); assert_eq!(data, chunk_str.as_bytes().to_vec()); let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, None); @@ -252,7 +245,7 @@ fn test_stackerdb_event_observer() { let (mut conf, _) = neon_integration_test_conf(); test_observer::register(&mut conf, &[EventKeyType::StackerDBChunks]); - let privks = vec![ + let privks = [ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R StacksPrivateKey::from_hex( "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", @@ -362,7 +355,7 @@ fn test_stackerdb_event_observer() { for i in 0..6 { let slot_id = i as u32; let privk = &privks[i / 3]; - let chunk_str = format!("Hello chunks {}", &i); + let chunk_str = format!("Hello chunks {i}"); let ack = post_stackerdb_chunk( &http_origin, &contract_id, @@ -371,7 +364,7 @@ fn test_stackerdb_event_observer() { slot_id, 1, ); - debug!("ACK: {:?}", &ack); + debug!("ACK: {ack:?}"); let data = get_stackerdb_chunk(&http_origin, &contract_id, slot_id, Some(1)); assert_eq!(data, chunk_str.as_bytes().to_vec()); @@ -383,11 +376,10 @@ fn test_stackerdb_event_observer() { // get events, verifying that they're all for the same contract (i.e. this one) let stackerdb_events: Vec<_> = test_observer::get_stackerdb_chunks() .into_iter() - .map(|stackerdb_event| { + .flat_map(|stackerdb_event| { assert_eq!(stackerdb_event.contract_id, contract_id); stackerdb_event.modified_slots }) - .flatten() .collect(); assert_eq!(stackerdb_events.len(), 6); @@ -396,7 +388,7 @@ fn test_stackerdb_event_observer() { assert_eq!(i as u32, event.slot_id); assert_eq!(event.slot_version, 1); - let expected_data = format!("Hello chunks {}", &i); + let expected_data = format!("Hello chunks {i}"); let expected_hash = Sha512Trunc256Sum::from_data(expected_data.as_bytes()); assert_eq!(event.data, expected_data.as_bytes().to_vec());