diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 41e14b4458..2d493753e5 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -122,6 +122,9 @@ jobs: - tests::signer::v0::signer_set_rollover - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend + - tests::signer::v0::tenure_extend_after_idle + - tests::signer::v0::stx_transfers_dont_effect_idle_timeout + - tests::signer::v0::idle_tenure_extend_active_mining - tests::signer::v0::multiple_miners_with_custom_chain_id - tests::signer::v0::block_commit_delay - tests::signer::v0::continue_after_fast_block_no_sortition diff --git a/clarity/src/vm/ast/mod.rs b/clarity/src/vm/ast/mod.rs index 1cff959695..5c615f46fa 100644 --- a/clarity/src/vm/ast/mod.rs +++ b/clarity/src/vm/ast/mod.rs @@ -353,7 +353,7 @@ mod test { ) -> std::result::Result { self.invoked_functions.push((cost_f, input.to_vec())); self.invocation_count += 1; - Ok(ExecutionCost::zero()) + Ok(ExecutionCost::ZERO) } fn add_cost(&mut self, _cost: ExecutionCost) -> std::result::Result<(), CostErrors> { self.cost_addition_count += 1; diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 430d707b02..2113ec6479 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -140,7 +140,7 @@ impl CostTracker for () { _cost_function: ClarityCostFunction, _input: &[u64], ) -> std::result::Result { - Ok(ExecutionCost::zero()) + Ok(ExecutionCost::ZERO) } fn add_cost(&mut self, _cost: ExecutionCost) -> std::result::Result<(), CostErrors> { Ok(()) @@ -707,7 +707,7 @@ impl LimitedCostTracker { contract_call_circuits: HashMap::new(), limit, memory_limit: CLARITY_MEMORY_LIMIT, - total: ExecutionCost::zero(), + total: ExecutionCost::ZERO, memory: 0, epoch, mainnet, @@ -731,7 +731,7 @@ impl LimitedCostTracker { contract_call_circuits: HashMap::new(), limit, memory_limit: CLARITY_MEMORY_LIMIT, - total: ExecutionCost::zero(), + total: ExecutionCost::ZERO, memory: 0, epoch, mainnet, @@ -880,7 +880,7 @@ impl LimitedCostTracker { pub fn get_total(&self) -> ExecutionCost { match self { Self::Limited(TrackerData { total, .. }) => total.clone(), - Self::Free => ExecutionCost::zero(), + Self::Free => ExecutionCost::ZERO, } } #[allow(clippy::panic)] @@ -1050,7 +1050,7 @@ impl CostTracker for LimitedCostTracker { match self { Self::Free => { // tracker is free, return zero! - return Ok(ExecutionCost::zero()); + return Ok(ExecutionCost::ZERO); } Self::Limited(ref mut data) => { if cost_function == ClarityCostFunction::Unimplemented { @@ -1195,15 +1195,13 @@ impl CostOverflowingMath for u64 { } impl ExecutionCost { - pub fn zero() -> ExecutionCost { - Self { - runtime: 0, - write_length: 0, - read_count: 0, - write_count: 0, - read_length: 0, - } - } + pub const ZERO: Self = Self { + runtime: 0, + write_length: 0, + read_count: 0, + write_count: 0, + read_length: 0, + }; /// Returns the percentage of self consumed in `numerator`'s largest proportion dimension. pub fn proportion_largest_dimension(&self, numerator: &ExecutionCost) -> u64 { @@ -1328,6 +1326,10 @@ impl ExecutionCost { read_length: first.read_length.max(second.read_length), } } + + pub fn is_zero(&self) -> bool { + *self == Self::ZERO + } } // ONLY WORKS IF INPUT IS u64 diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 618aa20937..087c4ba7a3 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -73,6 +73,9 @@ use crate::{ SignerMessage as SignerMessageTrait, VERSION_STRING, }; +/// Maximum size of the [BlockResponseData] serialized bytes +pub const BLOCK_RESPONSE_DATA_MAX_SIZE: u32 = 2 * 1024 * 1024; // 2MB + define_u8_enum!( /// Enum representing the stackerdb message identifier: this is /// the contract index in the signers contracts (i.e., X in signers-0-X) @@ -638,11 +641,16 @@ impl std::fmt::Display for BlockResponse { impl BlockResponse { /// Create a new accepted BlockResponse for the provided block signer signature hash and signature - pub fn accepted(hash: Sha512Trunc256Sum, sig: MessageSignature) -> Self { + pub fn accepted( + signer_signature_hash: Sha512Trunc256Sum, + signature: MessageSignature, + tenure_extend_timestamp: u64, + ) -> Self { Self::Accepted(BlockAccepted { - signer_signature_hash: hash, - signature: sig, + signer_signature_hash, + signature, metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(tenure_extend_timestamp), }) } @@ -652,8 +660,31 @@ impl BlockResponse { reject_code: RejectCode, private_key: &StacksPrivateKey, mainnet: bool, + timestamp: u64, ) -> Self { - Self::Rejected(BlockRejection::new(hash, reject_code, private_key, mainnet)) + Self::Rejected(BlockRejection::new( + hash, + reject_code, + private_key, + mainnet, + timestamp, + )) + } + + /// Get the tenure extend timestamp from the block response + pub fn get_tenure_extend_timestamp(&self) -> u64 { + match self { + BlockResponse::Accepted(accepted) => accepted.response_data.tenure_extend_timestamp, + BlockResponse::Rejected(rejection) => rejection.response_data.tenure_extend_timestamp, + } + } + + /// Get the signer signature hash from the block response + pub fn get_signer_signature_hash(&self) -> Sha512Trunc256Sum { + match self { + BlockResponse::Accepted(accepted) => accepted.signer_signature_hash, + BlockResponse::Rejected(rejection) => rejection.signer_signature_hash, + } } } @@ -739,6 +770,79 @@ impl SignerMessageMetadata { } } +/// The latest version of the block response data +pub const BLOCK_RESPONSE_DATA_VERSION: u8 = 2; + +/// Versioned, backwards-compatible struct for block response data +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BlockResponseData { + /// The version of the block response data + pub version: u8, + /// The block response data + pub tenure_extend_timestamp: u64, + /// When deserializing future versions, + /// there may be extra bytes that we don't know about + pub unknown_bytes: Vec, +} + +impl BlockResponseData { + /// Create a new BlockResponseData for the provided tenure extend timestamp and unknown bytes + pub fn new(tenure_extend_timestamp: u64) -> Self { + Self { + version: BLOCK_RESPONSE_DATA_VERSION, + tenure_extend_timestamp, + unknown_bytes: vec![], + } + } + + /// Create an empty BlockResponseData + pub fn empty() -> Self { + Self::new(u64::MAX) + } + + /// Serialize the "inner" block response data. Used to determine the bytes length of the serialized block response data + fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.tenure_extend_timestamp)?; + // write_next(fd, &self.unknown_bytes)?; + fd.write_all(&self.unknown_bytes) + .map_err(CodecError::WriteError)?; + Ok(()) + } +} + +impl StacksMessageCodec for BlockResponseData { + /// Serialize the block response data. + /// When creating a new version of the block response data, we are only ever + /// appending new bytes to the end of the struct. When serializing, we use + /// `bytes_len` to ensure that older versions of the code can read through the + /// end of the serialized bytes. + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.version)?; + let mut inner_bytes = vec![]; + self.inner_consensus_serialize(&mut inner_bytes)?; + write_next(fd, &inner_bytes)?; + Ok(()) + } + + /// Deserialize the block response data in a backwards-compatible manner. + /// When creating a new version of the block response data, we are only ever + /// appending new bytes to the end of the struct. When deserializing, we use + /// `bytes_len` to ensure that we read through the end of the serialized bytes. + fn consensus_deserialize(fd: &mut R) -> Result { + let Ok(version) = read_next(fd) else { + return Ok(Self::empty()); + }; + let inner_bytes: Vec = read_next_at_most(fd, BLOCK_RESPONSE_DATA_MAX_SIZE)?; + let mut inner_reader = inner_bytes.as_slice(); + let tenure_extend_timestamp = read_next(&mut inner_reader)?; + Ok(Self { + version, + tenure_extend_timestamp, + unknown_bytes: inner_reader.to_vec(), + }) + } +} + /// A rejection response from a signer for a proposed block #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BlockAccepted { @@ -748,6 +852,8 @@ pub struct BlockAccepted { pub signature: MessageSignature, /// Signer message metadata pub metadata: SignerMessageMetadata, + /// Extra versioned block response data + pub response_data: BlockResponseData, } impl StacksMessageCodec for BlockAccepted { @@ -755,6 +861,7 @@ impl StacksMessageCodec for BlockAccepted { write_next(fd, &self.signer_signature_hash)?; write_next(fd, &self.signature)?; write_next(fd, &self.metadata)?; + write_next(fd, &self.response_data)?; Ok(()) } @@ -762,21 +869,28 @@ impl StacksMessageCodec for BlockAccepted { let signer_signature_hash = read_next::(fd)?; let signature = read_next::(fd)?; let metadata = read_next::(fd)?; + let response_data = read_next::(fd)?; Ok(Self { signer_signature_hash, signature, metadata, + response_data, }) } } impl BlockAccepted { /// Create a new BlockAccepted for the provided block signer signature hash and signature - pub fn new(signer_signature_hash: Sha512Trunc256Sum, signature: MessageSignature) -> Self { + pub fn new( + signer_signature_hash: Sha512Trunc256Sum, + signature: MessageSignature, + tenure_extend_timestamp: u64, + ) -> Self { Self { signer_signature_hash, signature, metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(tenure_extend_timestamp), } } } @@ -796,6 +910,8 @@ pub struct BlockRejection { pub chain_id: u32, /// Signer message metadata pub metadata: SignerMessageMetadata, + /// Extra versioned block response data + pub response_data: BlockResponseData, } impl BlockRejection { @@ -805,6 +921,7 @@ impl BlockRejection { reason_code: RejectCode, private_key: &StacksPrivateKey, mainnet: bool, + timestamp: u64, ) -> Self { let chain_id = if mainnet { CHAIN_ID_MAINNET @@ -818,6 +935,7 @@ impl BlockRejection { signature: MessageSignature::empty(), chain_id, metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(timestamp), }; rejection .sign(private_key) @@ -830,6 +948,7 @@ impl BlockRejection { reject: BlockValidateReject, private_key: &StacksPrivateKey, mainnet: bool, + timestamp: u64, ) -> Self { let chain_id = if mainnet { CHAIN_ID_MAINNET @@ -843,6 +962,7 @@ impl BlockRejection { chain_id, signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(timestamp), }; rejection .sign(private_key) @@ -893,6 +1013,7 @@ impl StacksMessageCodec for BlockRejection { write_next(fd, &self.chain_id)?; write_next(fd, &self.signature)?; write_next(fd, &self.metadata)?; + write_next(fd, &self.response_data)?; Ok(()) } @@ -906,6 +1027,7 @@ impl StacksMessageCodec for BlockRejection { let chain_id = read_next::(fd)?; let signature = read_next::(fd)?; let metadata = read_next::(fd)?; + let response_data = read_next::(fd)?; Ok(Self { reason, reason_code, @@ -913,6 +1035,7 @@ impl StacksMessageCodec for BlockRejection { chain_id, signature, metadata, + response_data, }) } } @@ -1046,6 +1169,7 @@ mod test { RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), &StacksPrivateKey::new(), thread_rng().gen_bool(0.5), + thread_rng().next_u64(), ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) @@ -1057,6 +1181,7 @@ mod test { RejectCode::ConnectivityIssues, &StacksPrivateKey::new(), thread_rng().gen_bool(0.5), + thread_rng().next_u64(), ); let serialized_rejection = rejection.serialize_to_vec(); let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) @@ -1070,6 +1195,7 @@ mod test { signer_signature_hash: Sha512Trunc256Sum([0u8; 32]), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(thread_rng().next_u64()), }; let response = BlockResponse::Accepted(accepted); let serialized_response = response.serialize_to_vec(); @@ -1082,6 +1208,7 @@ mod test { RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), &StacksPrivateKey::new(), thread_rng().gen_bool(0.5), + thread_rng().next_u64(), )); let serialized_response = response.serialize_to_vec(); let deserialized_response = read_next::(&mut &serialized_response[..]) @@ -1095,6 +1222,7 @@ mod test { signer_signature_hash: Sha512Trunc256Sum([2u8; 32]), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::default(), + response_data: BlockResponseData::new(thread_rng().next_u64()), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)); let serialized_signer_message = signer_message.serialize_to_vec(); @@ -1241,9 +1369,9 @@ mod test { #[test] fn test_backwards_compatibility() { let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3"; - let block_rejected_bytes = hex_bytes(&block_rejected_hex).unwrap(); + let block_rejected_bytes = hex_bytes(block_rejected_hex).unwrap(); let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8"; - let block_accepted_bytes = hex_bytes(&block_accepted_hex).unwrap(); + let block_accepted_bytes = hex_bytes(block_accepted_hex).unwrap(); let block_rejected = read_next::(&mut &block_rejected_bytes[..]) .expect("Failed to deserialize BlockRejection"); let block_accepted = read_next::(&mut &block_accepted_bytes[..]) @@ -1258,6 +1386,7 @@ mod test { chain_id: CHAIN_ID_TESTNET, signature: MessageSignature::from_hex("006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df3").unwrap(), metadata: SignerMessageMetadata::empty(), + response_data: BlockResponseData::new(u64::MAX) })) ); @@ -1270,6 +1399,7 @@ mod test { .unwrap(), metadata: SignerMessageMetadata::empty(), signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), + response_data: BlockResponseData::new(u64::MAX) })) ); } @@ -1277,9 +1407,9 @@ mod test { #[test] fn test_block_response_metadata() { let block_rejected_hex = "010100000050426c6f636b206973206e6f7420612074656e7572652d737461727420626c6f636b2c20616e642068617320616e20756e7265636f676e697a65642074656e75726520636f6e73656e7375732068617368000691f95f84b7045f7dce7757052caa986ef042cb58f7df5031a3b5b5d0e3dda63e80000000006fb349212e1a1af1a3c712878d5159b5ec14636adb6f70be00a6da4ad4f88a9934d8a9abb229620dd8e0f225d63401e36c64817fb29e6c05591dcbe95c512df30000000b48656c6c6f20776f726c64"; - let block_rejected_bytes = hex_bytes(&block_rejected_hex).unwrap(); + let block_rejected_bytes = hex_bytes(block_rejected_hex).unwrap(); let block_accepted_hex = "010011717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e80000000b48656c6c6f20776f726c64"; - let block_accepted_bytes = hex_bytes(&block_accepted_hex).unwrap(); + let block_accepted_bytes = hex_bytes(block_accepted_hex).unwrap(); let block_rejected = read_next::(&mut &block_rejected_bytes[..]) .expect("Failed to deserialize BlockRejection"); let block_accepted = read_next::(&mut &block_accepted_bytes[..]) @@ -1296,6 +1426,7 @@ mod test { metadata: SignerMessageMetadata { server_version: "Hello world".to_string(), }, + response_data: BlockResponseData::new(u64::MAX), })) ); @@ -1310,6 +1441,7 @@ mod test { server_version: "Hello world".to_string(), }, signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), + response_data: BlockResponseData::empty(), })) ); } @@ -1322,4 +1454,137 @@ mod test { .expect("Failed to deserialize SignerMessageMetadata"); assert_eq!(deserialized_metadata, SignerMessageMetadata::empty()); } + + #[test] + fn block_response_data_serialization() { + let mut response_data = BlockResponseData::new(2); + response_data.unknown_bytes = vec![1, 2, 3, 4]; + let mut bytes = vec![]; + response_data.consensus_serialize(&mut bytes).unwrap(); + // 1 byte version + 4 bytes (bytes_len) + 8 bytes tenure_extend_timestamp + 4 bytes unknown_bytes + assert_eq!(bytes.len(), 17); + let deserialized_data = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(response_data, deserialized_data); + + let response_data = BlockResponseData::new(2); + let mut bytes = vec![]; + response_data.consensus_serialize(&mut bytes).unwrap(); + // 1 byte version + 4 bytes (bytes_len) + 8 bytes tenure_extend_timestamp + 0 bytes unknown_bytes + assert_eq!(bytes.len(), 13); + let deserialized_data = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(response_data, deserialized_data); + } + + /// Mock struct for testing "future proofing" of the block response data + pub struct NewerBlockResponseData { + pub version: u8, + pub tenure_extend_timestamp: u64, + pub some_other_field: u64, + pub yet_another_field: u64, + } + + impl NewerBlockResponseData { + pub fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.tenure_extend_timestamp)?; + write_next(fd, &self.some_other_field)?; + write_next(fd, &self.yet_another_field)?; + Ok(()) + } + + pub fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.version)?; + let mut inner_bytes = vec![]; + self.inner_consensus_serialize(&mut inner_bytes)?; + let bytes_len = inner_bytes.len() as u32; + write_next(fd, &bytes_len)?; + fd.write_all(&inner_bytes).map_err(CodecError::WriteError)?; + Ok(()) + } + } + + #[test] + fn test_newer_block_response_data() { + let new_response_data = NewerBlockResponseData { + version: 11, + tenure_extend_timestamp: 2, + some_other_field: 3, + yet_another_field: 4, + }; + + let mut bytes = vec![]; + new_response_data.consensus_serialize(&mut bytes).unwrap(); + let mut reader = bytes.as_slice(); + let deserialized_data = read_next::(&mut reader) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(reader.len(), 0, "Expected bytes to be fully consumed"); + assert_eq!(deserialized_data.version, 11); + assert_eq!(deserialized_data.tenure_extend_timestamp, 2); + // two extra u64s: + assert_eq!(deserialized_data.unknown_bytes.len(), 16); + + // BlockResponseData with unknown bytes can serialize/deserialize back to itself + let mut bytes = vec![]; + deserialized_data.consensus_serialize(&mut bytes).unwrap(); + let deserialized_data_2 = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockResponseData"); + assert_eq!(deserialized_data, deserialized_data_2); + } + + /// Test using an older version of BlockAccepted to verify that we can deserialize + /// future versions + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + pub struct BlockAcceptedOld { + /// The signer signature hash of the block that was accepted + pub signer_signature_hash: Sha512Trunc256Sum, + /// The signer's signature across the acceptance + pub signature: MessageSignature, + /// Signer message metadata + pub metadata: SignerMessageMetadata, + } + + impl StacksMessageCodec for BlockAcceptedOld { + fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { + write_next(fd, &self.signer_signature_hash)?; + write_next(fd, &self.signature)?; + write_next(fd, &self.metadata)?; + Ok(()) + } + + fn consensus_deserialize(fd: &mut R) -> Result { + let signer_signature_hash = read_next::(fd)?; + let signature = read_next::(fd)?; + let metadata = read_next::(fd)?; + Ok(Self { + signer_signature_hash, + signature, + metadata, + }) + } + } + + #[test] + fn block_accepted_old_version_can_deserialize() { + let block_accepted = BlockAccepted { + signer_signature_hash: Sha512Trunc256Sum::from_hex("11717149677c2ac97d15ae5954f7a716f10100b9cb81a2bf27551b2f2e54ef19").unwrap(), + metadata: SignerMessageMetadata::default(), + signature: MessageSignature::from_hex("001c694f8134c5c90f2f2bcd330e9f423204884f001b5df0050f36a2c4ff79dd93522bb2ae395ea87de4964886447507c18374b7a46ee2e371e9bf332f0706a3e8").unwrap(), + response_data: BlockResponseData::new(u64::MAX) + }; + + let mut bytes = vec![]; + block_accepted.consensus_serialize(&mut bytes).unwrap(); + + // Ensure the old version can deserialize + let block_accepted_old = read_next::(&mut &bytes[..]) + .expect("Failed to deserialize BlockAcceptedOld"); + assert_eq!( + block_accepted.signer_signature_hash, + block_accepted_old.signer_signature_hash + ); + assert_eq!(block_accepted.signature, block_accepted_old.signature); + assert_eq!(block_accepted.metadata, block_accepted_old.metadata); + } } diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index a332b344ce..2fec112d1f 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,6 +11,9 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +- Added tenure extend timestamp to signer block responses +- Added tenure_idle_timeout_secs configuration option for determining when a time-based tenure extend will be accepted + ## [3.0.0.0.4.0] ### Added diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 139c34fba8..3beba641f2 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -45,7 +45,7 @@ tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } rand = { workspace = true } url = "2.1.0" -rusqlite = { workspace = true } +rusqlite = { workspace = true, features = ["functions"] } [dev-dependencies] clarity = { path = "../clarity", features = ["testing"] } diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 9fb5c496c8..f2f042dffb 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -122,6 +122,8 @@ pub struct ProposalEvalConfig { /// Time to wait for the last block of a tenure to be globally accepted or rejected before considering /// a new miner's block at the same height as valid. pub tenure_last_block_proposal_timeout: Duration, + /// How much idle time must pass before allowing a tenure extend + pub tenure_idle_timeout: Duration, } impl From<&SignerConfig> for ProposalEvalConfig { @@ -130,6 +132,7 @@ impl From<&SignerConfig> for ProposalEvalConfig { first_proposal_burn_block_timing: value.first_proposal_burn_block_timing, block_proposal_timeout: value.block_proposal_timeout, tenure_last_block_proposal_timeout: value.tenure_last_block_proposal_timeout, + tenure_idle_timeout: value.tenure_idle_timeout, } } } @@ -190,7 +193,6 @@ impl SortitionsView { signer_db: &mut SignerDb, block: &NakamotoBlock, block_pk: &StacksPublicKey, - reward_cycle: u64, reset_view_if_wrong_consensus_hash: bool, ) -> Result { if self @@ -284,14 +286,7 @@ impl SortitionsView { "last_sortition_consensus_hash" => ?self.last_sortition.as_ref().map(|x| x.consensus_hash), ); self.reset_view(client)?; - return self.check_proposal( - client, - signer_db, - block, - block_pk, - reward_cycle, - false, - ); + return self.check_proposal(client, signer_db, block, block_pk, false); } warn!( "Miner block proposal has consensus hash that is neither the current or last sortition. Considering invalid."; @@ -349,7 +344,6 @@ impl SortitionsView { &proposed_by, tenure_change, block, - reward_cycle, signer_db, client, )? { @@ -368,14 +362,23 @@ impl SortitionsView { // in tenure extends, we need to check: // (1) if this is the most recent sortition, an extend is allowed if it changes the burnchain view // (2) if this is the most recent sortition, an extend is allowed if enough time has passed to refresh the block limit + let sortition_consensus_hash = proposed_by.state().consensus_hash; let changed_burn_view = - tenure_extend.burn_view_consensus_hash != proposed_by.state().consensus_hash; - let enough_time_passed = Self::tenure_time_passed_block_lim()?; + tenure_extend.burn_view_consensus_hash != sortition_consensus_hash; + let extend_timestamp = signer_db.calculate_tenure_extend_timestamp( + self.config.tenure_idle_timeout, + &block, + false, + ); + let epoch_time = get_epoch_time_secs(); + let enough_time_passed = epoch_time > extend_timestamp; if !changed_burn_view && !enough_time_passed { warn!( "Miner block proposal contains a tenure extend, but the burnchain view has not changed and enough time has not passed to refresh the block limit. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "extend_timestamp" => extend_timestamp, + "epoch_time" => epoch_time, ); return Ok(false); } @@ -539,7 +542,6 @@ impl SortitionsView { fn check_tenure_change_confirms_parent( tenure_change: &TenureChangePayload, block: &NakamotoBlock, - reward_cycle: u64, signer_db: &mut SignerDb, client: &StacksClient, tenure_last_block_proposal_timeout: Duration, @@ -584,7 +586,7 @@ impl SortitionsView { // If we have seen this block already, make sure its state is updated to globally accepted. // Otherwise, don't worry about it. if let Ok(Some(mut block_info)) = - signer_db.block_lookup(reward_cycle, &nakamoto_tip.signer_signature_hash()) + signer_db.block_lookup(&nakamoto_tip.signer_signature_hash()) { if block_info.state != BlockState::GloballyAccepted { if let Err(e) = block_info.mark_globally_accepted() { @@ -619,7 +621,6 @@ impl SortitionsView { proposed_by: &ProposedBy, tenure_change: &TenureChangePayload, block: &NakamotoBlock, - reward_cycle: u64, signer_db: &mut SignerDb, client: &StacksClient, ) -> Result { @@ -627,7 +628,6 @@ impl SortitionsView { let confirms_expected_parent = Self::check_tenure_change_confirms_parent( tenure_change, block, - reward_cycle, signer_db, client, self.config.tenure_last_block_proposal_timeout, @@ -691,12 +691,6 @@ impl SortitionsView { } } - /// Has the current tenure lasted long enough to extend the block limit? - pub fn tenure_time_passed_block_lim() -> Result { - // TODO - Ok(false) - } - /// Fetch a new view of the recent sortitions pub fn fetch_view( config: ProposalEvalConfig, diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 37706368dc..ba55bd9810 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -413,6 +413,7 @@ pub(crate) mod tests { block_proposal_timeout: config.block_proposal_timeout, tenure_last_block_proposal_timeout: config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: config.block_proposal_validation_timeout, + tenure_idle_timeout: config.tenure_idle_timeout, } } diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 117dd4814f..934686d1c2 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -236,7 +236,8 @@ mod tests { use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; use libsigner::v0::messages::{ - BlockRejection, BlockResponse, RejectCode, SignerMessage, SignerMessageMetadata, + BlockRejection, BlockResponse, BlockResponseData, RejectCode, SignerMessage, + SignerMessageMetadata, }; use rand::{thread_rng, RngCore}; @@ -286,6 +287,7 @@ mod tests { chain_id: thread_rng().next_u32(), signature: MessageSignature::empty(), metadata: SignerMessageMetadata::empty(), + response_data: BlockResponseData::new(thread_rng().next_u64()), }; let signer_message = SignerMessage::BlockResponse(BlockResponse::Rejected(block_reject)); let ack = StackerDBChunkAckData { diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 57c90ab0eb..18412bc5f2 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -38,6 +38,7 @@ const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 600_000; const BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS: u64 = 120_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; const DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS: u64 = 30; +const TENURE_IDLE_TIMEOUT_SECS: u64 = 300; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -135,6 +136,8 @@ pub struct SignerConfig { pub tenure_last_block_proposal_timeout: Duration, /// How much time to wait for a block proposal validation response before marking the block invalid pub block_proposal_validation_timeout: Duration, + /// How much idle time must pass before allowing a tenure extend + pub tenure_idle_timeout: Duration, } /// The parsed configuration for the signer @@ -171,6 +174,8 @@ pub struct GlobalConfig { /// How long to wait for a response from a block proposal validation response from the node /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout: Duration, + /// How much idle time must pass before allowing a tenure extend + pub tenure_idle_timeout: Duration, } /// Internal struct for loading up the config file @@ -206,6 +211,8 @@ struct RawConfigFile { /// How long to wait (in millisecs) for a response from a block proposal validation response from the node /// before marking that block as invalid and rejecting it pub block_proposal_validation_timeout_ms: Option, + /// How much idle time (in seconds) must pass before a tenure extend is allowed + pub tenure_idle_timeout_secs: Option, } impl RawConfigFile { @@ -297,6 +304,12 @@ impl TryFrom for GlobalConfig { .unwrap_or(BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS), ); + let tenure_idle_timeout = Duration::from_secs( + raw_data + .tenure_idle_timeout_secs + .unwrap_or(TENURE_IDLE_TIMEOUT_SECS), + ); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -312,6 +325,7 @@ impl TryFrom for GlobalConfig { chain_id: raw_data.chain_id, tenure_last_block_proposal_timeout, block_proposal_validation_timeout, + tenure_idle_timeout, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index c8f6041478..11faadf871 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -285,6 +285,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo block_proposal_timeout: self.config.block_proposal_timeout, tenure_last_block_proposal_timeout: self.config.tenure_last_block_proposal_timeout, block_proposal_validation_timeout: self.config.block_proposal_validation_timeout, + tenure_idle_timeout: self.config.tenure_idle_timeout, })) } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index c0bd679a54..9c4c348f8e 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -16,15 +16,17 @@ use std::fmt::Display; use std::path::Path; -use std::time::SystemTime; +use std::time::{Duration, SystemTime}; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; +use blockstack_lib::chainstate::stacks::TransactionPayload; use blockstack_lib::util_lib::db::{ query_row, query_rows, sqlite_open, table_exists, tx_begin_immediate, u64_to_sql, Error as DBError, }; use clarity::types::chainstate::{BurnchainHeaderHash, StacksAddress}; use libsigner::BlockProposal; +use rusqlite::functions::FunctionFlags; use rusqlite::{ params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Transaction, }; @@ -157,6 +159,8 @@ pub struct BlockInfo { pub signed_group: Option, /// The block state relative to the signer's view of the stacks blockchain pub state: BlockState, + /// Consumed processing time in milliseconds to validate this block + pub validation_time_ms: Option, /// Extra data specific to v0, v1, etc. pub ext: ExtraBlockInfo, } @@ -175,10 +179,20 @@ impl From for BlockInfo { signed_group: None, ext: ExtraBlockInfo::default(), state: BlockState::Unprocessed, + validation_time_ms: None, } } } impl BlockInfo { + /// Whether the block is a tenure change block or not + pub fn is_tenure_change(&self) -> bool { + self.block + .txs + .first() + .map(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))) + .unwrap_or(false) + } + /// Mark this block as locally accepted, valid, signed over, and records either the self or group signed timestamp in the block info if it wasn't /// already set. pub fn mark_locally_accepted(&mut self, group_signed: bool) -> Result<(), String> { @@ -329,6 +343,13 @@ CREATE INDEX IF NOT EXISTS blocks_state ON blocks ((json_extract(block_info, '$. CREATE INDEX IF NOT EXISTS blocks_signed_group ON blocks ((json_extract(block_info, '$.signed_group'))); "#; +static CREATE_INDEXES_5: &str = r#" +CREATE INDEX IF NOT EXISTS blocks_signed_over ON blocks (consensus_hash, signed_over); +CREATE INDEX IF NOT EXISTS blocks_consensus_hash_state ON blocks (consensus_hash, state); +CREATE INDEX IF NOT EXISTS blocks_state ON blocks (state); +CREATE INDEX IF NOT EXISTS blocks_signed_group ON blocks (signed_group); +"#; + static CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, @@ -373,7 +394,7 @@ CREATE TABLE IF NOT EXISTS block_signatures ( -- the sighash is sufficient to uniquely identify the block across all burnchain, PoX, -- and stacks forks. signer_signature_hash TEXT NOT NULL, - -- signtaure itself + -- signature itself signature TEXT NOT NULL, PRIMARY KEY (signature) ) STRICT;"#; @@ -390,6 +411,69 @@ CREATE TABLE IF NOT EXISTS block_rejection_signer_addrs ( PRIMARY KEY (signer_addr) ) STRICT;"#; +// Migration logic necessary to move blocks from the old blocks table to the new blocks table +static MIGRATE_BLOCKS_TABLE_2_BLOCKS_TABLE_3: &str = r#" +CREATE TABLE IF NOT EXISTS temp_blocks ( + -- The block sighash commits to all of the stacks and burnchain state as of its parent, + -- as well as the tenure itself so there's no need to include the reward cycle. Just + -- the sighash is sufficient to uniquely identify the block across all burnchain, PoX, + -- and stacks forks. + signer_signature_hash TEXT NOT NULL PRIMARY KEY, + reward_cycle INTEGER NOT NULL, + block_info TEXT NOT NULL, + consensus_hash TEXT NOT NULL, + signed_over INTEGER NOT NULL, + broadcasted INTEGER, + stacks_height INTEGER NOT NULL, + burn_block_height INTEGER NOT NULL, + valid INTEGER, + state TEXT NOT NULL, + signed_group INTEGER, + signed_self INTEGER, + proposed_time INTEGER NOT NULL, + validation_time_ms INTEGER, + tenure_change INTEGER NOT NULL +) STRICT; + +INSERT INTO temp_blocks ( + signer_signature_hash, + reward_cycle, + block_info, + consensus_hash, + signed_over, + broadcasted, + stacks_height, + burn_block_height, + valid, + state, + signed_group, + signed_self, + proposed_time, + validation_time_ms, + tenure_change +) +SELECT + signer_signature_hash, + reward_cycle, + block_info, + consensus_hash, + signed_over, + broadcasted, + stacks_height, + burn_block_height, + json_extract(block_info, '$.valid') AS valid, + json_extract(block_info, '$.state') AS state, + json_extract(block_info, '$.signed_group') AS signed_group, + json_extract(block_info, '$.signed_self') AS signed_self, + json_extract(block_info, '$.proposed_time') AS proposed_time, + json_extract(block_info, '$.validation_time_ms') AS validation_time_ms, + is_tenure_change(block_info) AS tenure_change +FROM blocks; + +DROP TABLE blocks; + +ALTER TABLE temp_blocks RENAME TO blocks;"#; + static SCHEMA_1: &[&str] = &[ DROP_SCHEMA_0, CREATE_DB_CONFIG, @@ -431,9 +515,16 @@ static SCHEMA_4: &[&str] = &[ "INSERT OR REPLACE INTO db_config (version) VALUES (4);", ]; +static SCHEMA_5: &[&str] = &[ + MIGRATE_BLOCKS_TABLE_2_BLOCKS_TABLE_3, + CREATE_INDEXES_5, + "DELETE FROM db_config;", // Be extra careful. Make sure there is only ever one row in the table. + "INSERT INTO db_config (version) VALUES (5);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 4; + pub const SCHEMA_VERSION: u32 = 5; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -519,10 +610,49 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 4 to schema 5 + fn schema_5_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 5 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_5.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + + /// Register custom scalar functions used by the database + fn register_scalar_functions(&self) -> Result<(), DBError> { + // Register helper function for determining if a block is a tenure change transaction + // Required only for data migration from Schema 4 to Schema 5 + self.db.create_scalar_function( + "is_tenure_change", + 1, + FunctionFlags::SQLITE_UTF8 | FunctionFlags::SQLITE_DETERMINISTIC, + |ctx| { + let value = ctx.get::(0)?; + let block_info = serde_json::from_str::(&value) + .map_err(|e| SqliteError::UserFunctionError(e.into()))?; + Ok(block_info.is_tenure_change()) + }, + )?; + Ok(()) + } + + /// Drop registered scalar functions used only for data migrations + fn remove_scalar_functions(&self) -> Result<(), DBError> { + self.db.remove_function("is_tenure_change", 1)?; + Ok(()) + } + /// Either instantiate a new database, or migrate an existing one /// If the detected version of the existing database is 0 (i.e., a pre-migration /// logic DB, the DB will be dropped). fn create_or_migrate(&mut self) -> Result<(), DBError> { + self.register_scalar_functions()?; let sql_tx = tx_begin_immediate(&mut self.db)?; loop { let version = Self::get_schema_version(&sql_tx)?; @@ -531,7 +661,8 @@ impl SignerDb { 1 => Self::schema_2_migration(&sql_tx)?, 2 => Self::schema_3_migration(&sql_tx)?, 3 => Self::schema_4_migration(&sql_tx)?, - 4 => break, + 4 => Self::schema_5_migration(&sql_tx)?, + 5 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, @@ -539,6 +670,7 @@ impl SignerDb { } } sql_tx.commit()?; + self.remove_scalar_functions()?; Ok(()) } @@ -577,15 +709,11 @@ impl SignerDb { /// Fetch a block from the database using the block's /// `signer_signature_hash` - pub fn block_lookup( - &self, - reward_cycle: u64, - hash: &Sha512Trunc256Sum, - ) -> Result, DBError> { + pub fn block_lookup(&self, hash: &Sha512Trunc256Sum) -> Result, DBError> { let result: Option = query_row( &self.db, - "SELECT block_info FROM blocks WHERE reward_cycle = ? AND signer_signature_hash = ?", - params![u64_to_sql(reward_cycle)?, hash.to_string()], + "SELECT block_info FROM blocks WHERE signer_signature_hash = ?", + params![hash.to_string()], )?; try_deserialize(result) @@ -618,7 +746,7 @@ impl SignerDb { &self, tenure: &ConsensusHash, ) -> Result, DBError> { - let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') IN (?2, ?3) ORDER BY stacks_height DESC LIMIT 1"; + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND state IN (?2, ?3) ORDER BY stacks_height DESC LIMIT 1"; let args = params![ tenure, &BlockState::GloballyAccepted.to_string(), @@ -634,7 +762,7 @@ impl SignerDb { &self, tenure: &ConsensusHash, ) -> Result, DBError> { - let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND json_extract(block_info, '$.state') = ?2 ORDER BY stacks_height DESC LIMIT 1"; + let query = "SELECT block_info FROM blocks WHERE consensus_hash = ?1 AND state = ?2 ORDER BY stacks_height DESC LIMIT 1"; let args = params![tenure, &BlockState::GloballyAccepted.to_string()]; let result: Option = query_row(&self.db, query, args)?; @@ -643,7 +771,7 @@ impl SignerDb { /// Return the canonical tip -- the last globally accepted block. pub fn get_canonical_tip(&self) -> Result, DBError> { - let query = "SELECT block_info FROM blocks WHERE json_extract(block_info, '$.state') = ?1 ORDER BY stacks_height DESC, json_extract(block_info, '$.signed_group') DESC LIMIT 1"; + let query = "SELECT block_info FROM blocks WHERE state = ?1 ORDER BY stacks_height DESC, signed_group DESC LIMIT 1"; let args = params![&BlockState::GloballyAccepted.to_string()]; let result: Option = query_row(&self.db, query, args)?; @@ -697,12 +825,12 @@ impl SignerDb { serde_json::to_string(&block_info).expect("Unable to serialize block info"); let hash = &block_info.signer_signature_hash(); let block_id = &block_info.block.block_id(); - let signed_over = &block_info.signed_over; + let signed_over = block_info.signed_over; let vote = block_info .vote .as_ref() .map(|v| if v.rejected { "REJECT" } else { "ACCEPT" }); - let broadcasted = self.get_block_broadcasted(block_info.reward_cycle, hash)?; + let broadcasted = self.get_block_broadcasted(hash)?; debug!("Inserting block_info."; "reward_cycle" => %block_info.reward_cycle, "burn_block_height" => %block_info.burn_block_height, @@ -712,24 +840,28 @@ impl SignerDb { "broadcasted" => ?broadcasted, "vote" => vote ); - self.db - .execute( - "INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", - params![ - u64_to_sql(block_info.reward_cycle)?, u64_to_sql(block_info.burn_block_height)?, hash.to_string(), block_json, - signed_over, - &broadcasted, - u64_to_sql(block_info.block.header.chain_length)?, - block_info.block.header.consensus_hash.to_hex(), - ], - )?; - + self.db.execute("INSERT OR REPLACE INTO blocks (reward_cycle, burn_block_height, signer_signature_hash, block_info, signed_over, broadcasted, stacks_height, consensus_hash, valid, state, signed_group, signed_self, proposed_time, validation_time_ms, tenure_change) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)", params![ + u64_to_sql(block_info.reward_cycle)?, + u64_to_sql(block_info.burn_block_height)?, + hash.to_string(), + block_json, + &block_info.signed_over, + &broadcasted, + u64_to_sql(block_info.block.header.chain_length)?, + block_info.block.header.consensus_hash.to_hex(), + &block_info.valid, &block_info.state.to_string(), + &block_info.signed_group, + &block_info.signed_self, + &block_info.proposed_time, + &block_info.validation_time_ms, + &block_info.is_tenure_change() + ])?; Ok(()) } /// Determine if there are any unprocessed blocks pub fn has_unprocessed_blocks(&self, reward_cycle: u64) -> Result { - let query = "SELECT block_info FROM blocks WHERE reward_cycle = ?1 AND json_extract(block_info, '$.state') = ?2 LIMIT 1"; + let query = "SELECT block_info FROM blocks WHERE reward_cycle = ?1 AND state = ?2 LIMIT 1"; let result: Option = query_row( &self.db, query, @@ -807,15 +939,13 @@ impl SignerDb { /// Mark a block as having been broadcasted and therefore GloballyAccepted pub fn set_block_broadcasted( &self, - reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, ts: u64, ) -> Result<(), DBError> { - let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2) WHERE reward_cycle = ?3 AND signer_signature_hash = ?4"; + let qry = "UPDATE blocks SET broadcasted = ?1, block_info = json_set(block_info, '$.state', ?2), state = ?2 WHERE signer_signature_hash = ?3"; let args = params![ u64_to_sql(ts)?, BlockState::GloballyAccepted.to_string(), - u64_to_sql(reward_cycle)?, block_sighash ]; @@ -827,12 +957,11 @@ impl SignerDb { /// Get the timestamp at which the block was broadcasted. pub fn get_block_broadcasted( &self, - reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, ) -> Result, DBError> { let qry = - "SELECT IFNULL(broadcasted,0) AS broadcasted FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2"; - let args = params![u64_to_sql(reward_cycle)?, block_sighash]; + "SELECT IFNULL(broadcasted,0) AS broadcasted FROM blocks WHERE signer_signature_hash = ?"; + let args = params![block_sighash]; let Some(broadcasted): Option = query_row(&self.db, qry, args)? else { return Ok(None); @@ -846,11 +975,10 @@ impl SignerDb { /// Get the current state of a given block in the database pub fn get_block_state( &self, - reward_cycle: u64, block_sighash: &Sha512Trunc256Sum, ) -> Result, DBError> { - let qry = "SELECT json_extract(block_info, '$.state') FROM blocks WHERE reward_cycle = ?1 AND signer_signature_hash = ?2 LIMIT 1"; - let args = params![&u64_to_sql(reward_cycle)?, block_sighash]; + let qry = "SELECT state FROM blocks WHERE signer_signature_hash = ?1 LIMIT 1"; + let args = params![block_sighash]; let state_opt: Option = query_row(&self.db, qry, args)?; let Some(state) = state_opt else { return Ok(None); @@ -859,6 +987,69 @@ impl SignerDb { BlockState::try_from(state.as_str()).map_err(|_| DBError::Corruption)?, )) } + + /// Return the start time (epoch time in seconds) and the processing time in milliseconds of the tenure (idenfitied by consensus_hash). + fn get_tenure_times(&self, tenure: &ConsensusHash) -> Result<(u64, u64), DBError> { + let query = "SELECT tenure_change, proposed_time, validation_time_ms FROM blocks WHERE consensus_hash = ?1 AND state = ?2 ORDER BY stacks_height DESC"; + let args = params![tenure, BlockState::GloballyAccepted.to_string()]; + let mut stmt = self.db.prepare(query)?; + let rows = stmt.query_map(args, |row| { + let tenure_change_block: bool = row.get(0)?; + let proposed_time: u64 = row.get(1)?; + let validation_time_ms: Option = row.get(2)?; + Ok((tenure_change_block, proposed_time, validation_time_ms)) + })?; + let mut tenure_processing_time_ms = 0_u64; + let mut tenure_start_time = None; + let mut nmb_rows = 0; + for (i, row) in rows.enumerate() { + nmb_rows += 1; + let (tenure_change_block, proposed_time, validation_time_ms) = row?; + tenure_processing_time_ms = + tenure_processing_time_ms.saturating_add(validation_time_ms.unwrap_or(0)); + tenure_start_time = Some(proposed_time); + if tenure_change_block { + debug!("Found tenure change block {i} blocks ago in tenure {tenure}"); + break; + } + } + debug!("Calculated tenure extend timestamp from {nmb_rows} blocks in tenure {tenure}"); + Ok(( + tenure_start_time.unwrap_or(get_epoch_time_secs()), + tenure_processing_time_ms, + )) + } + + /// Calculate the tenure extend timestamp. If determine the timestamp for a block rejection, check_tenure_extend should be set to false to avoid recalculating + /// the tenure extend timestamp for a tenure extend block. + pub fn calculate_tenure_extend_timestamp( + &self, + tenure_idle_timeout: Duration, + block: &NakamotoBlock, + check_tenure_extend: bool, + ) -> u64 { + if check_tenure_extend && block.get_tenure_tx_payload().is_some() { + let tenure_extend_timestamp = + get_epoch_time_secs().wrapping_add(tenure_idle_timeout.as_secs()); + debug!("Calculated tenure extend timestamp for a tenure extend block. Rolling over timestamp: {tenure_extend_timestamp}"); + return tenure_extend_timestamp; + } + let tenure_idle_timeout_secs = tenure_idle_timeout.as_secs(); + let (tenure_start_time, tenure_process_time_ms) = self.get_tenure_times(&block.header.consensus_hash).inspect_err(|e| error!("Error occurred calculating tenure extend timestamp: {e:?}. Defaulting to {tenure_idle_timeout_secs} from now.")).unwrap_or((get_epoch_time_secs(), 0)); + // Plus (ms + 999)/1000 to round up to the nearest second + let tenure_extend_timestamp = tenure_start_time + .saturating_add(tenure_idle_timeout_secs) + .saturating_add(tenure_process_time_ms.saturating_add(999) / 1000); + debug!("Calculated tenure extend timestamp"; + "tenure_extend_timestamp" => tenure_extend_timestamp, + "tenure_start_time" => tenure_start_time, + "tenure_process_time_ms" => tenure_process_time_ms, + "tenure_idle_timeout_secs" => tenure_idle_timeout_secs, + "tenure_extend_in" => tenure_extend_timestamp.saturating_sub(get_epoch_time_secs()), + "consensus_hash" => %block.header.consensus_hash, + ); + tenure_extend_timestamp + } } fn try_deserialize(s: Option) -> Result, DBError> @@ -877,6 +1068,12 @@ mod tests { use std::path::PathBuf; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; + use blockstack_lib::chainstate::stacks::{ + StacksTransaction, TenureChangeCause, TenureChangePayload, TransactionAuth, + TransactionVersion, + }; + use clarity::types::chainstate::{StacksBlockId, StacksPrivateKey, StacksPublicKey}; + use clarity::util::hash::Hash160; use clarity::util::secp256k1::MessageSignature; use libsigner::BlockProposal; @@ -919,39 +1116,42 @@ mod tests { fn test_basic_signer_db_with_path(db_path: impl AsRef) { let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - let (block_info, block_proposal) = create_block(); - let reward_cycle = block_info.reward_cycle; - db.insert_block(&block_info) + let (block_info_1, block_proposal_1) = create_block_override(|b| { + b.block.header.consensus_hash = ConsensusHash([0x01; 20]); + }); + let (block_info_2, block_proposal_2) = create_block_override(|b| { + b.block.header.consensus_hash = ConsensusHash([0x02; 20]); + }); + db.insert_block(&block_info_1) .expect("Unable to insert block into db"); let block_info = db - .block_lookup( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal_1.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block from db"); - assert_eq!(BlockInfo::from(block_proposal.clone()), block_info); + assert_eq!(BlockInfo::from(block_proposal_1.clone()), block_info); - // Test looking up a block from a different reward cycle + // Test looking up a block with an unknown hash let block_info = db - .block_lookup( - reward_cycle + 1, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal_2.block.header.signer_signature_hash()) .unwrap(); assert!(block_info.is_none()); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + let block_info = db + .block_lookup(&block_proposal_2.block.header.signer_signature_hash()) + .unwrap() + .expect("Unable to get block from db"); + + assert_eq!(BlockInfo::from(block_proposal_2.clone()), block_info); // test getting the block state let block_state = db - .get_block_state( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .get_block_state(&block_proposal_1.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block state from db"); - assert_eq!(block_state, BlockInfo::from(block_proposal.clone()).state); + assert_eq!(block_state, BlockInfo::from(block_proposal_1.clone()).state); } #[test] @@ -971,15 +1171,11 @@ mod tests { let db_path = tmp_db_path(); let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); let (block_info, block_proposal) = create_block(); - let reward_cycle = block_info.reward_cycle; db.insert_block(&block_info) .expect("Unable to insert block into db"); let block_info = db - .block_lookup( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block from db"); @@ -1005,10 +1201,7 @@ mod tests { .expect("Unable to insert block into db"); let block_info = db - .block_lookup( - reward_cycle, - &block_proposal.block.header.signer_signature_hash(), - ) + .block_lookup(&block_proposal.block.header.signer_signature_hash()) .unwrap() .expect("Unable to get block from db"); @@ -1180,51 +1373,47 @@ mod tests { .expect("Unable to insert block into db"); assert!(db - .get_block_broadcasted( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) + .get_block_broadcasted(&block_info_1.signer_signature_hash()) .unwrap() .is_none()); assert_eq!( - db.block_lookup( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) - .expect("Unable to get block from db") - .expect("Unable to get block from db") - .state, + db.block_lookup(&block_info_1.signer_signature_hash()) + .expect("Unable to get block from db") + .expect("Unable to get block from db") + .state, BlockState::Unprocessed ); - db.set_block_broadcasted( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash(), - 12345, - ) - .unwrap(); + assert!(db + .get_last_globally_accepted_block(&block_info_1.block.header.consensus_hash) + .unwrap() + .is_none()); + db.set_block_broadcasted(&block_info_1.signer_signature_hash(), 12345) + .unwrap(); assert_eq!( - db.block_lookup( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) - .expect("Unable to get block from db") - .expect("Unable to get block from db") - .state, + db.block_lookup(&block_info_1.signer_signature_hash()) + .expect("Unable to get block from db") + .expect("Unable to get block from db") + .state, BlockState::GloballyAccepted ); + assert_eq!( + db.get_last_globally_accepted_block(&block_info_1.block.header.consensus_hash) + .unwrap() + .unwrap() + .signer_signature_hash(), + block_info_1.block.header.signer_signature_hash() + ); db.insert_block(&block_info_1) .expect("Unable to insert block into db a second time"); assert_eq!( - db.get_block_broadcasted( - block_info_1.reward_cycle, - &block_info_1.signer_signature_hash() - ) - .unwrap() - .unwrap(), + db.get_block_broadcasted(&block_info_1.signer_signature_hash()) + .unwrap() + .unwrap(), 12345 ); } + #[test] fn state_machine() { let (mut block, _) = create_block(); @@ -1307,4 +1496,284 @@ mod tests { assert_eq!(db.get_canonical_tip().unwrap().unwrap(), block_info_2); } + + #[test] + fn get_accepted_blocks() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let consensus_hash_1 = ConsensusHash([0x01; 20]); + let consensus_hash_2 = ConsensusHash([0x02; 20]); + let consensus_hash_3 = ConsensusHash([0x03; 20]); + let (mut block_info_1, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.block.header.chain_length = 1; + b.burn_height = 1; + }); + let (mut block_info_2, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.block.header.chain_length = 2; + b.burn_height = 2; + }); + let (mut block_info_3, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x03; 65]); + b.block.header.chain_length = 3; + b.burn_height = 3; + }); + let (mut block_info_4, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_2; + b.block.header.miner_signature = MessageSignature([0x03; 65]); + b.block.header.chain_length = 3; + b.burn_height = 4; + }); + block_info_1.mark_globally_accepted().unwrap(); + block_info_2.mark_locally_accepted(false).unwrap(); + block_info_3.mark_locally_accepted(false).unwrap(); + block_info_4.mark_globally_accepted().unwrap(); + + db.insert_block(&block_info_1).unwrap(); + db.insert_block(&block_info_2).unwrap(); + db.insert_block(&block_info_3).unwrap(); + db.insert_block(&block_info_4).unwrap(); + + // Verify tenure consensus_hash_1 + let block_info = db + .get_last_accepted_block(&consensus_hash_1) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_3); + let block_info = db + .get_last_globally_accepted_block(&consensus_hash_1) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_1); + + // Verify tenure consensus_hash_2 + let block_info = db + .get_last_accepted_block(&consensus_hash_2) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_4); + let block_info = db + .get_last_globally_accepted_block(&consensus_hash_2) + .unwrap() + .unwrap(); + assert_eq!(block_info, block_info_4); + + // Verify tenure consensus_hash_3 + assert!(db + .get_last_accepted_block(&consensus_hash_3) + .unwrap() + .is_none()); + assert!(db + .get_last_globally_accepted_block(&consensus_hash_3) + .unwrap() + .is_none()); + } + + fn generate_tenure_blocks() -> Vec { + let tenure_change_payload = TenureChangePayload { + tenure_consensus_hash: ConsensusHash([0x04; 20]), // same as in nakamoto header + prev_tenure_consensus_hash: ConsensusHash([0x01; 20]), + burn_view_consensus_hash: ConsensusHash([0x04; 20]), + previous_tenure_end: StacksBlockId([0x03; 32]), + previous_tenure_blocks: 1, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&StacksPublicKey::from_private( + &StacksPrivateKey::new(), + )), + }; + let tenure_change_tx_payload = + TransactionPayload::TenureChange(tenure_change_payload.clone()); + let tenure_change_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&StacksPrivateKey::new()).unwrap(), + tenure_change_tx_payload.clone(), + ); + + let consensus_hash_1 = ConsensusHash([0x01; 20]); + let consensus_hash_2 = ConsensusHash([0x02; 20]); + let (mut block_info_1, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.block.header.chain_length = 1; + b.burn_height = 1; + }); + block_info_1.state = BlockState::GloballyAccepted; + block_info_1.block.txs.push(tenure_change_tx.clone()); + block_info_1.validation_time_ms = Some(1000); + block_info_1.proposed_time = get_epoch_time_secs() + 500; + + let (mut block_info_2, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.block.header.chain_length = 2; + b.burn_height = 2; + }); + block_info_2.state = BlockState::GloballyAccepted; + block_info_2.validation_time_ms = Some(2000); + block_info_2.proposed_time = block_info_1.proposed_time + 5; + + let (mut block_info_3, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x03; 65]); + b.block.header.chain_length = 3; + b.burn_height = 2; + }); + block_info_3.state = BlockState::GloballyAccepted; + block_info_3.block.txs.push(tenure_change_tx); + block_info_3.validation_time_ms = Some(5000); + block_info_3.proposed_time = block_info_1.proposed_time + 10; + + // This should have no effect on the time calculations as its not a globally accepted block + let (mut block_info_4, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_1; + b.block.header.miner_signature = MessageSignature([0x04; 65]); + b.block.header.chain_length = 3; + b.burn_height = 2; + }); + block_info_4.state = BlockState::LocallyAccepted; + block_info_4.validation_time_ms = Some(9000); + block_info_4.proposed_time = block_info_1.proposed_time + 15; + + let (mut block_info_5, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_2; + b.block.header.miner_signature = MessageSignature([0x05; 65]); + b.block.header.chain_length = 4; + b.burn_height = 3; + }); + block_info_5.state = BlockState::GloballyAccepted; + block_info_5.validation_time_ms = Some(20000); + block_info_5.proposed_time = block_info_1.proposed_time + 20; + + // This should have no effect on the time calculations as its not a globally accepted block + let (mut block_info_6, _block_proposal) = create_block_override(|b| { + b.block.header.consensus_hash = consensus_hash_2; + b.block.header.miner_signature = MessageSignature([0x06; 65]); + b.block.header.chain_length = 5; + b.burn_height = 3; + }); + block_info_6.state = BlockState::LocallyAccepted; + block_info_6.validation_time_ms = Some(40000); + block_info_6.proposed_time = block_info_1.proposed_time + 25; + + vec![ + block_info_1, + block_info_2, + block_info_3, + block_info_4, + block_info_5, + block_info_6, + ] + } + + #[test] + fn tenure_times() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + let block_infos = generate_tenure_blocks(); + let consensus_hash_1 = block_infos[0].block.header.consensus_hash; + let consensus_hash_2 = block_infos.last().unwrap().block.header.consensus_hash; + let consensus_hash_3 = ConsensusHash([0x03; 20]); + + db.insert_block(&block_infos[0]).unwrap(); + db.insert_block(&block_infos[1]).unwrap(); + + // Verify tenure consensus_hash_1 + let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_1).unwrap(); + assert_eq!(start_time, block_infos[0].proposed_time); + assert_eq!(processing_time, 3000); + + db.insert_block(&block_infos[2]).unwrap(); + db.insert_block(&block_infos[3]).unwrap(); + + let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_1).unwrap(); + assert_eq!(start_time, block_infos[2].proposed_time); + assert_eq!(processing_time, 5000); + + db.insert_block(&block_infos[4]).unwrap(); + db.insert_block(&block_infos[5]).unwrap(); + + // Verify tenure consensus_hash_2 + let (start_time, processing_time) = db.get_tenure_times(&consensus_hash_2).unwrap(); + assert_eq!(start_time, block_infos[4].proposed_time); + assert_eq!(processing_time, 20000); + + // Verify tenure consensus_hash_3 (unknown hash) + let (start_time, validation_time) = db.get_tenure_times(&consensus_hash_3).unwrap(); + assert!(start_time < block_infos[0].proposed_time, "Should have been generated from get_epoch_time_secs() making it much older than our artificially late proposal times"); + assert_eq!(validation_time, 0); + } + + #[test] + fn tenure_extend_timestamp() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + + let block_infos = generate_tenure_blocks(); + let mut unknown_block = block_infos[0].block.clone(); + unknown_block.header.consensus_hash = ConsensusHash([0x03; 20]); + + db.insert_block(&block_infos[0]).unwrap(); + db.insert_block(&block_infos[1]).unwrap(); + + let tenure_idle_timeout = Duration::from_secs(10); + // Verify tenure consensus_hash_1 + let timestamp_hash_1_before = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &block_infos[0].block, true); + assert_eq!( + timestamp_hash_1_before, + block_infos[0] + .proposed_time + .saturating_add(tenure_idle_timeout.as_secs()) + .saturating_add(3) + ); + + db.insert_block(&block_infos[2]).unwrap(); + db.insert_block(&block_infos[3]).unwrap(); + + let timestamp_hash_1_after = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &block_infos[0].block, true); + + assert_eq!( + timestamp_hash_1_after, + block_infos[2] + .proposed_time + .saturating_add(tenure_idle_timeout.as_secs()) + .saturating_add(5) + ); + + db.insert_block(&block_infos[4]).unwrap(); + db.insert_block(&block_infos[5]).unwrap(); + + // Verify tenure consensus_hash_2 + let timestamp_hash_2 = db.calculate_tenure_extend_timestamp( + tenure_idle_timeout, + &block_infos.last().unwrap().block, + true, + ); + assert_eq!( + timestamp_hash_2, + block_infos[4] + .proposed_time + .saturating_add(tenure_idle_timeout.as_secs()) + .saturating_add(20) + ); + + let now = get_epoch_time_secs().saturating_add(tenure_idle_timeout.as_secs()); + let timestamp_hash_2_no_tenure_extend = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &block_infos[0].block, false); + assert_ne!(timestamp_hash_2, timestamp_hash_2_no_tenure_extend); + assert!(now < timestamp_hash_2_no_tenure_extend); + + // Verify tenure consensus_hash_3 (unknown hash) + let timestamp_hash_3 = + db.calculate_tenure_extend_timestamp(tenure_idle_timeout, &unknown_block, true); + assert!( + timestamp_hash_3.saturating_add(tenure_idle_timeout.as_secs()) + < block_infos[0].proposed_time + ); + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index bec9f1258d..2037a25def 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -90,6 +90,7 @@ fn setup_test_environment( first_proposal_burn_block_timing: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(5), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }, }; @@ -132,13 +133,13 @@ fn check_proposal_units() { setup_test_environment("check_proposal_units"); assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); view.last_sortition = None; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); } @@ -154,7 +155,6 @@ fn check_proposal_miner_pkh_mismatch() { &mut signer_db, &block, &different_block_pk, - 1, false, ) .unwrap()); @@ -166,7 +166,6 @@ fn check_proposal_miner_pkh_mismatch() { &mut signer_db, &block, &different_block_pk, - 1, false, ) .unwrap()); @@ -263,7 +262,7 @@ fn reorg_timing_testing( config, } = MockServerClient::new(); let h = std::thread::spawn(move || { - view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1, false) + view.check_proposal(&client, &mut signer_db, &block, &block_pk, false) }); header_clone.chain_length -= 1; let response = crate::client::tests::build_get_tenure_tip_response( @@ -300,16 +299,16 @@ fn check_proposal_invalid_status() { setup_test_environment("invalid_status"); block.header.consensus_hash = view.cur_sortition.consensus_hash; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedAfterFirstBlock; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); block.header.consensus_hash = view.last_sortition.as_ref().unwrap().consensus_hash; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); view.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; @@ -320,7 +319,7 @@ fn check_proposal_invalid_status() { // parent blocks have been seen before, while the signer state checks are only reasoning about // stacks blocks seen by the signer, which may be a subset) assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); } @@ -369,7 +368,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(!view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); let mut extend_payload = make_tenure_change_payload(); @@ -379,7 +378,7 @@ fn check_proposal_tenure_extend_invalid_conditions() { let tx = make_tenure_change_tx(extend_payload); block.txs = vec![tx]; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); } @@ -406,7 +405,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &curr_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -417,7 +415,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &last_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -430,7 +427,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &curr_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -441,7 +437,6 @@ fn check_block_proposal_timeout() { &mut signer_db, &last_sortition_block, &block_pk, - 1, false, ) .unwrap()); @@ -533,7 +528,7 @@ fn check_proposal_refresh() { setup_test_environment("check_proposal_refresh"); block.header.consensus_hash = view.cur_sortition.consensus_hash; assert!(view - .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, 1, false) + .check_proposal(&stacks_client, &mut signer_db, &block, &block_pk, false) .unwrap()); let MockServerClient { @@ -575,7 +570,7 @@ fn check_proposal_refresh() { view.cur_sortition.consensus_hash = ConsensusHash([128; 20]); let h = std::thread::spawn(move || { - view.check_proposal(&client, &mut signer_db, &block, &block_pk, 1, true) + view.check_proposal(&client, &mut signer_db, &block, &block_pk, true) }); crate::client::tests::write_response( server, diff --git a/stacks-signer/src/tests/conf/signer-0.toml b/stacks-signer/src/tests/conf/signer-0.toml index 19002c1914..26a9380dcb 100644 --- a/stacks-signer/src/tests/conf/signer-0.toml +++ b/stacks-signer/src/tests/conf/signer-0.toml @@ -1,6 +1,6 @@ stacks_private_key = "6a1fc1a3183018c6d79a4e11e154d2bdad2d89ac8bc1b0a021de8b4d28774fbb01" node_host = "127.0.0.1:20443" -endpoint = "localhost:30000" +endpoint = "[::1]:30000" network = "testnet" auth_password = "12345" db_path = ":memory:" diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index b537cfae8a..22a02bc107 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -292,7 +292,7 @@ impl Signer { /// Determine this signers response to a proposed block /// Returns a BlockResponse if we have already validated the block /// Returns None otherwise - fn determine_response(&self, block_info: &BlockInfo) -> Option { + fn determine_response(&mut self, block_info: &BlockInfo) -> Option { let valid = block_info.valid?; let response = if valid { debug!("{self}: Accepting block {}", block_info.block.block_id()); @@ -300,7 +300,15 @@ impl Signer { .private_key .sign(block_info.signer_signature_hash().bits()) .expect("Failed to sign block"); - BlockResponse::accepted(block_info.signer_signature_hash(), signature) + BlockResponse::accepted( + block_info.signer_signature_hash(), + signature, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block, + true, + ), + ) } else { debug!("{self}: Rejecting block {}", block_info.block.block_id()); BlockResponse::rejected( @@ -308,6 +316,11 @@ impl Signer { RejectCode::RejectedInPriorRound, &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block, + false, + ), ) }; Some(response) @@ -336,7 +349,7 @@ impl Signer { let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); if let Some(block_info) = self .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) + .block_lookup(&signer_signature_hash) .expect("Failed to connect to signer DB") { let Some(block_response) = self.determine_response(&block_info) else { @@ -395,7 +408,6 @@ impl Signer { &mut self.signer_db, &block_proposal.block, miner_pubkey, - self.reward_cycle, true, ) { // Error validating block @@ -410,6 +422,11 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), )) } // Block proposal is bad @@ -424,6 +441,11 @@ impl Signer { RejectCode::SortitionViewMismatch, &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), )) } // Block proposal passed check, still don't know if valid @@ -440,6 +462,11 @@ impl Signer { RejectCode::NoSortitionView, &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), )) }; @@ -535,10 +562,7 @@ impl Signer { self.submitted_block_proposal = None; } // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { + let mut block_info = match self.signer_db.block_lookup(&signer_signature_hash) { Ok(Some(block_info)) => { if block_info.is_locally_finalized() { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); @@ -563,6 +587,13 @@ impl Signer { } block_info.signed_self.get_or_insert(get_epoch_time_secs()); } + // Record the block validation time but do not consider stx transfers or boot contract calls + block_info.validation_time_ms = if block_validate_ok.cost.is_zero() { + Some(0) + } else { + Some(block_validate_ok.validation_time_ms) + }; + let signature = self .private_key .sign(&signer_signature_hash.0) @@ -571,7 +602,15 @@ impl Signer { self.signer_db .insert_block(&block_info) .unwrap_or_else(|e| self.handle_insert_block_error(e)); - let accepted = BlockAccepted::new(block_info.signer_signature_hash(), signature); + let accepted = BlockAccepted::new( + block_info.signer_signature_hash(), + signature, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block, + true, + ), + ); // have to save the signature _after_ the block info self.handle_block_signature(stacks_client, &accepted); Some(BlockResponse::Accepted(accepted)) @@ -594,10 +633,7 @@ impl Signer { { self.submitted_block_proposal = None; } - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { + let mut block_info = match self.signer_db.block_lookup(&signer_signature_hash) { Ok(Some(block_info)) => { if block_info.is_locally_finalized() { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); @@ -625,6 +661,11 @@ impl Signer { block_validate_reject.clone(), &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_info.block, + false, + ), ); self.signer_db .insert_block(&block_info) @@ -683,10 +724,7 @@ impl Signer { } let signature_sighash = block_proposal.block.header.signer_signature_hash(); // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signature_sighash) - { + let mut block_info = match self.signer_db.block_lookup(&signature_sighash) { Ok(Some(block_info)) => { if block_info.state == BlockState::GloballyRejected || block_info.state == BlockState::GloballyAccepted @@ -722,6 +760,11 @@ impl Signer { RejectCode::ConnectivityIssues, &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), ); if let Err(e) = block_info.mark_locally_rejected() { warn!("{self}: Failed to mark block as locally rejected: {e:?}",); @@ -769,7 +812,7 @@ impl Signer { let block_hash = &rejection.signer_signature_hash; let signature = &rejection.signature; - let mut block_info = match self.signer_db.block_lookup(self.reward_cycle, block_hash) { + let mut block_info = match self.signer_db.block_lookup(block_hash) { Ok(Some(block_info)) => { if block_info.state == BlockState::GloballyRejected || block_info.state == BlockState::GloballyAccepted @@ -867,6 +910,7 @@ impl Signer { signer_signature_hash: block_hash, signature, metadata, + .. } = accepted; debug!( "{self}: Received a block-accept signature: ({block_hash}, {signature}, {})", @@ -874,10 +918,7 @@ impl Signer { ); // Have we already processed this block? - match self - .signer_db - .get_block_state(self.reward_cycle, block_hash) - { + match self.signer_db.get_block_state(block_hash) { Ok(Some(state)) => { if state == BlockState::GloballyAccepted || state == BlockState::GloballyRejected { debug!("{self}: Received block signature for a block that is already marked as {}. Ignoring...", state); @@ -959,14 +1000,9 @@ impl Signer { } // have enough signatures to broadcast! - let Ok(Some(mut block_info)) = self - .signer_db - .block_lookup(self.reward_cycle, block_hash) - .map_err(|e| { - warn!("{self}: Failed to load block {block_hash}: {e:?})"); - e - }) - else { + let Ok(Some(mut block_info)) = self.signer_db.block_lookup(block_hash).inspect_err(|e| { + warn!("{self}: Failed to load block {block_hash}: {e:?})"); + }) else { warn!("{self}: No such block {block_hash}"); return; }; @@ -1039,11 +1075,10 @@ impl Signer { ); stacks_client.post_block_until_ok(self, &block); - if let Err(e) = self.signer_db.set_block_broadcasted( - self.reward_cycle, - &block_hash, - get_epoch_time_secs(), - ) { + if let Err(e) = self + .signer_db + .set_block_broadcasted(&block_hash, get_epoch_time_secs()) + { warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); } } @@ -1059,11 +1094,10 @@ impl Signer { "consensus_hash" => %block.header.consensus_hash ); - if let Err(e) = self.signer_db.set_block_broadcasted( - self.reward_cycle, - &block_hash, - get_epoch_time_secs(), - ) { + if let Err(e) = self + .signer_db + .set_block_broadcasted(&block_hash, get_epoch_time_secs()) + { warn!("{self}: Failed to set block broadcasted for {block_hash}: {e:?}"); } return true; @@ -1103,6 +1137,11 @@ impl Signer { RejectCode::TestingDirective, &self.private_key, self.mainnet, + self.signer_db.calculate_tenure_extend_timestamp( + self.proposal_config.tenure_idle_timeout, + &block_proposal.block, + false, + ), )) } else { None diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 74ecd19bc1..68cdb2454a 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -649,7 +649,7 @@ impl NakamotoBlockBuilder { "parent_block_id" => %block.header.parent_block_id, "block_size" => size, "execution_consumed" => %consumed, - "%-full" => block_limit.proportion_largest_dimension(&consumed), + "percent_full" => block_limit.proportion_largest_dimension(&consumed), "assembly_time_ms" => ts_end.saturating_sub(ts_start), "consensus_hash" => %block.header.consensus_hash ); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index dbaf226015..35f6e5d1e1 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2465,7 +2465,7 @@ impl NakamotoChainState { db_handle: &mut SortitionHandleConn, staging_db_tx: &NakamotoStagingBlocksTx, headers_conn: &Connection, - reward_set: RewardSet, + reward_set: &RewardSet, obtain_method: NakamotoBlockObtainMethod, ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); @@ -2535,7 +2535,7 @@ impl NakamotoChainState { return Ok(false); }; - let signing_weight = match block.header.verify_signer_signatures(&reward_set) { + let signing_weight = match block.header.verify_signer_signatures(reward_set) { Ok(x) => x, Err(e) => { warn!("Received block, but the signer signatures are invalid"; @@ -3896,7 +3896,7 @@ impl NakamotoChainState { // Nakamoto must load block cost from parent if this block isn't a tenure change. // If this is a tenure-extend, then the execution cost is reset. let initial_cost = if new_tenure || tenure_extend { - ExecutionCost::zero() + ExecutionCost::ZERO } else { let parent_cost_total = Self::get_total_tenure_cost_at(chainstate_tx.as_tx(), &parent_index_hash)? @@ -4223,7 +4223,7 @@ impl NakamotoChainState { tx_receipts, matured_rewards, matured_rewards_info: matured_rewards_info_opt, - parent_microblocks_cost: ExecutionCost::zero(), + parent_microblocks_cost: ExecutionCost::ZERO, anchored_block_cost: block_execution_cost, parent_burn_block_hash, parent_burn_block_height: u32::try_from(parent_burn_block_height).unwrap_or(0), // shouldn't be fatal @@ -4713,7 +4713,7 @@ impl NakamotoChainState { tx_receipts, matured_rewards, matured_rewards_info: matured_rewards_info_opt, - parent_microblocks_cost: ExecutionCost::zero(), + parent_microblocks_cost: ExecutionCost::ZERO, anchored_block_cost: block_execution_cost, parent_burn_block_hash, parent_burn_block_height: u32::try_from(parent_burn_block_height).unwrap_or(0), // shouldn't be fatal diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 7ae25d00f6..64782c67d6 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -1039,7 +1039,7 @@ fn test_simple_pox_lockup_transition_pox_2() { bob_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == key_to_stacks_addr(&charlie) { assert!( - r.execution_cost != ExecutionCost::zero(), + r.execution_cost != ExecutionCost::ZERO, "Execution cost is not zero!" ); charlie_txs.insert(t.auth.get_origin_nonce(), r); @@ -1385,7 +1385,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { bob_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == charlie_address { assert!( - r.execution_cost != ExecutionCost::zero(), + r.execution_cost != ExecutionCost::ZERO, "Execution cost is not zero!" ); charlie_txs.insert(t.auth.get_origin_nonce(), r); diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index dc65db0324..8a173c6adc 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -459,7 +459,7 @@ fn simple_pox_lockup_transition_pox_2() { bob_txs.insert(t.auth.get_origin_nonce(), r); } else if addr == key_to_stacks_addr(&charlie) { assert!( - r.execution_cost != ExecutionCost::zero(), + r.execution_cost != ExecutionCost::ZERO, "Execution cost is not zero!" ); charlie_txs.insert(t.auth.get_origin_nonce(), r); diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index b05365d5ac..bf84cc1362 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -1192,7 +1192,7 @@ mod test { new_tip.microblock_tail.clone(), &block_reward, None, - &ExecutionCost::zero(), + &ExecutionCost::ZERO, 123, false, vec![], diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 791f2064dc..233a9d5978 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4304,7 +4304,7 @@ impl StacksChainState { post_condition_aborted: false, stx_burned: 0, contract_analysis: None, - execution_cost: ExecutionCost::zero(), + execution_cost: ExecutionCost::ZERO, microblock_header: None, tx_index: 0, vm_error: None, @@ -5107,7 +5107,7 @@ impl StacksChainState { ); cost } else { - ExecutionCost::zero() + ExecutionCost::ZERO }; let mut clarity_tx = StacksChainState::chainstate_block_begin( @@ -5194,7 +5194,7 @@ impl StacksChainState { // if we get here, then we need to reset the block-cost back to 0 since this begins the // epoch defined by this miner. - clarity_tx.reset_cost(ExecutionCost::zero()); + clarity_tx.reset_cost(ExecutionCost::ZERO); // is this stacks block the first of a new epoch? let (applied_epoch_transition, mut tx_receipts) = diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 7fed3e9a46..42f72d5165 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1624,7 +1624,7 @@ impl StacksChainState { allocations_tx, allocation_events, Value::okay_true(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ); receipts.push(allocations_receipt); @@ -1725,7 +1725,7 @@ impl StacksChainState { &mut tx, &parent_hash, &first_tip_info, - &ExecutionCost::zero(), + &ExecutionCost::ZERO, 0, )?; tx.commit()?; diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index aef9627d15..3df99ea886 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -192,7 +192,7 @@ impl StacksTransactionReceipt { result: Value::okay_true(), stx_burned: 0, contract_analysis: None, - execution_cost: ExecutionCost::zero(), + execution_cost: ExecutionCost::ZERO, microblock_header: None, tx_index: 0, vm_error: None, @@ -307,7 +307,7 @@ impl StacksTransactionReceipt { result: Value::okay_true(), stx_burned: 0, contract_analysis: None, - execution_cost: ExecutionCost::zero(), + execution_cost: ExecutionCost::ZERO, microblock_header: None, tx_index: 0, vm_error: None, diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 7da2ff1599..6f7a9fe9ea 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -254,7 +254,7 @@ impl UnconfirmedState { let mut total_burns = 0; let mut all_receipts = vec![]; let mut mined_txs = UnconfirmedTxMap::new(); - let mut new_cost = ExecutionCost::zero(); + let mut new_cost = ExecutionCost::ZERO; let mut new_bytes = 0; let mut num_new_mblocks = 0; let mut have_state = self.have_state; @@ -351,7 +351,7 @@ impl UnconfirmedState { // apply injected faults if self.disable_cost_check { warn!("Fault injection: disabling microblock miner's cost tracking"); - self.cost_so_far = ExecutionCost::zero(); + self.cost_so_far = ExecutionCost::ZERO; } if self.disable_bytes_check { warn!("Fault injection: disabling microblock miner's size tracking"); @@ -709,7 +709,7 @@ mod test { } let mut anchor_size = 0; - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let (burn_ops, stacks_block, _) = peer.make_tenure( |ref mut miner, @@ -946,7 +946,7 @@ mod test { } let mut anchor_size = 0; - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let (burn_ops, stacks_block, _) = peer.make_tenure( |ref mut miner, @@ -1205,7 +1205,7 @@ mod test { } let mut anchor_size = 0; - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let (burn_ops, stacks_block, _) = peer.make_tenure( |ref mut miner, diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 7a72cc1652..082e9c374c 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1190,7 +1190,7 @@ impl<'a> StacksMicroblockBuilder<'a> { } if self.runtime.disable_cost_check { warn!("Fault injection: disabling miner limit on microblock runtime cost"); - clarity_tx.reset_cost(ExecutionCost::zero()); + clarity_tx.reset_cost(ExecutionCost::ZERO); } self.runtime.bytes_so_far = bytes_so_far; @@ -1418,7 +1418,7 @@ impl<'a> StacksMicroblockBuilder<'a> { } if self.runtime.disable_cost_check { warn!("Fault injection: disabling miner limit on microblock runtime cost"); - clarity_tx.reset_cost(ExecutionCost::zero()); + clarity_tx.reset_cost(ExecutionCost::ZERO); } self.runtime.bytes_so_far = bytes_so_far; diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 7b7720b996..90fc7f1705 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -5064,7 +5064,7 @@ fn paramaterized_mempool_walk_test( available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index c89679f414..a5497cea24 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -215,7 +215,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { pub fn cost_so_far(&self) -> ExecutionCost { match self.cost_track { Some(ref track) => track.get_total(), - None => ExecutionCost::zero(), + None => ExecutionCost::ZERO, } } @@ -1765,7 +1765,7 @@ impl<'a, 'b> ClarityTransactionConnection<'a, 'b> { pub fn cost_so_far(&self) -> ExecutionCost { match self.cost_track { Some(ref track) => track.get_total(), - None => ExecutionCost::zero(), + None => ExecutionCost::ZERO, } } diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 03447e9bf4..a209ef0677 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -151,7 +151,7 @@ pub fn make_block( &mut chainstate_tx, &new_index_hash, &new_tip_info, - &ExecutionCost::zero(), + &ExecutionCost::ZERO, block_height, ) .unwrap(); @@ -288,7 +288,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -326,7 +326,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -363,7 +363,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -405,7 +405,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -445,7 +445,7 @@ fn mempool_walk_over_fork() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -683,7 +683,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -720,7 +720,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -757,7 +757,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -866,7 +866,7 @@ fn test_iterate_candidates_skipped_transaction() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event() @@ -981,7 +981,7 @@ fn test_iterate_candidates_processing_error_transaction() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event() @@ -1096,7 +1096,7 @@ fn test_iterate_candidates_problematic_transaction() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event() @@ -1248,7 +1248,7 @@ fn test_iterate_candidates_concurrent_write_lock() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -2792,7 +2792,7 @@ fn test_filter_txs_by_type() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), @@ -2827,7 +2827,7 @@ fn test_filter_txs_by_type() { available_tx.tx.tx.clone(), vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ), ) .convert_to_event(), diff --git a/stackslib/src/cost_estimates/tests/common.rs b/stackslib/src/cost_estimates/tests/common.rs index 01f6c32ec7..4ce9ea48cc 100644 --- a/stackslib/src/cost_estimates/tests/common.rs +++ b/stackslib/src/cost_estimates/tests/common.rs @@ -44,8 +44,8 @@ pub fn make_block_receipt(tx_receipts: Vec) -> StacksE tx_receipts, matured_rewards: vec![], matured_rewards_info: None, - parent_microblocks_cost: ExecutionCost::zero(), - anchored_block_cost: ExecutionCost::zero(), + parent_microblocks_cost: ExecutionCost::ZERO, + anchored_block_cost: ExecutionCost::ZERO, parent_burn_block_hash: BurnchainHeaderHash([0; 32]), parent_burn_block_height: 1, parent_burn_block_timestamp: 1, diff --git a/stackslib/src/cost_estimates/tests/cost_estimators.rs b/stackslib/src/cost_estimates/tests/cost_estimators.rs index eabbb4a148..1ed6b034e5 100644 --- a/stackslib/src/cost_estimates/tests/cost_estimators.rs +++ b/stackslib/src/cost_estimates/tests/cost_estimators.rs @@ -102,7 +102,7 @@ fn make_dummy_transfer_tx() -> StacksTransactionReceipt { tx, vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ) } diff --git a/stackslib/src/cost_estimates/tests/fee_scalar.rs b/stackslib/src/cost_estimates/tests/fee_scalar.rs index 685fc6430a..3bfc4b966a 100644 --- a/stackslib/src/cost_estimates/tests/fee_scalar.rs +++ b/stackslib/src/cost_estimates/tests/fee_scalar.rs @@ -94,7 +94,7 @@ fn make_dummy_transfer_tx(fee: u64) -> StacksTransactionReceipt { tx, vec![], Value::okay(Value::Bool(true)).unwrap(), - ExecutionCost::zero(), + ExecutionCost::ZERO, ) } @@ -115,7 +115,7 @@ fn make_dummy_cc_tx(fee: u64) -> StacksTransactionReceipt { vec![], Value::okay(Value::Bool(true)).unwrap(), 0, - ExecutionCost::zero(), + ExecutionCost::ZERO, ) } diff --git a/stackslib/src/net/api/getstxtransfercost.rs b/stackslib/src/net/api/getstxtransfercost.rs index b8801e7d7c..78e6e66851 100644 --- a/stackslib/src/net/api/getstxtransfercost.rs +++ b/stackslib/src/net/api/getstxtransfercost.rs @@ -108,7 +108,7 @@ impl RPCRequestHandler for RPCGetStxTransferCostRequestHandler { if let Some((_, fee_estimator, metric)) = rpc_args.get_estimators_ref() { // STX transfer transactions have zero runtime cost - let estimated_cost = ExecutionCost::zero(); + let estimated_cost = ExecutionCost::ZERO; let estimations = RPCPostFeeRateRequestHandler::estimate_tx_fee_from_cost_and_length( &preamble, diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 2f35f29a18..c832695103 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -16,6 +16,9 @@ use std::io::{Read, Write}; use std::thread::{self, JoinHandle, Thread}; +#[cfg(any(test, feature = "testing"))] +use std::time::Duration; +use std::time::Instant; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; @@ -65,6 +68,10 @@ use crate::util_lib::db::Error as DBError; #[cfg(any(test, feature = "testing"))] pub static TEST_VALIDATE_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); +#[cfg(any(test, feature = "testing"))] +/// Artificial delay to add to block validation. +pub static TEST_VALIDATE_DELAY_DURATION_SECS: std::sync::Mutex> = + std::sync::Mutex::new(None); // This enum is used to supply a `reason_code` for validation // rejection responses. This is serialized as an enum with string @@ -145,6 +152,7 @@ pub struct BlockValidateOk { pub signer_signature_hash: Sha512Trunc256Sum, pub cost: ExecutionCost, pub size: u64, + pub validation_time_ms: u64, } /// This enum is used for serializing the response to block @@ -354,9 +362,15 @@ impl NakamotoBlockProposal { info!("Block validation is no longer stalled due to testing directive."); } } - let ts_start = get_epoch_time_ms(); - // Measure time from start of function - let time_elapsed = || get_epoch_time_ms().saturating_sub(ts_start); + let start = Instant::now(); + + #[cfg(any(test, feature = "testing"))] + { + if let Some(delay) = *TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap() { + warn!("Sleeping for {} seconds to simulate slow processing", delay); + thread::sleep(Duration::from_secs(delay)); + } + } let mainnet = self.chain_id == CHAIN_ID_MAINNET; if self.chain_id != chainstate.chain_id || mainnet != chainstate.mainnet { @@ -572,6 +586,8 @@ impl NakamotoBlockProposal { }); } + let validation_time_ms = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); + info!( "Participant: validated anchored block"; "block_header_hash" => %computed_block_header_hash, @@ -580,7 +596,7 @@ impl NakamotoBlockProposal { "parent_stacks_block_id" => %block.header.parent_block_id, "block_size" => size, "execution_cost" => %cost, - "validation_time_ms" => time_elapsed(), + "validation_time_ms" => validation_time_ms, "tx_fees_microstacks" => block.txs.iter().fold(0, |agg: u64, tx| { agg.saturating_add(tx.get_tx_fee()) }) @@ -590,6 +606,7 @@ impl NakamotoBlockProposal { signer_signature_hash: block.header.signer_signature_hash(), cost, size, + validation_time_ms, }) } } diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index cd8a337acb..c6c62dd1fe 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -431,7 +431,7 @@ impl<'a> TestRPC<'a> { let tip = SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) .unwrap(); - let mut anchor_cost = ExecutionCost::zero(); + let mut anchor_cost = ExecutionCost::ZERO; let mut anchor_size = 0; // make a block diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index c742bcf00b..481d0b2047 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -239,7 +239,7 @@ fn test_try_make_response() { let miner_privk = &rpc_test.peer_1.miner.nakamoto_miner_key(); - let mut block = { + let mut good_block = { let chainstate = rpc_test.peer_1.chainstate(); let parent_stacks_header = NakamotoChainState::get_block_header(chainstate.db(), &stacks_tip) @@ -315,12 +315,12 @@ fn test_try_make_response() { }; // Increment the timestamp by 1 to ensure it is different from the previous block - block.header.timestamp += 1; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + good_block.header.timestamp += 1; + rpc_test.peer_1.miner.sign_nakamoto_block(&mut good_block); // post the valid block proposal let proposal = NakamotoBlockProposal { - block: block.clone(), + block: good_block.clone(), chain_id: 0x80000000, }; @@ -335,12 +335,16 @@ fn test_try_make_response() { requests.push(request); // Set the timestamp to a value in the past - block.header.timestamp -= 10000; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + let mut early_time_block = good_block.clone(); + early_time_block.header.timestamp -= 10000; + rpc_test + .peer_1 + .miner + .sign_nakamoto_block(&mut early_time_block); // post the invalid block proposal let proposal = NakamotoBlockProposal { - block: block.clone(), + block: early_time_block, chain_id: 0x80000000, }; @@ -355,12 +359,16 @@ fn test_try_make_response() { requests.push(request); // Set the timestamp to a value in the future - block.header.timestamp += 20000; - rpc_test.peer_1.miner.sign_nakamoto_block(&mut block); + let mut late_time_block = good_block.clone(); + late_time_block.header.timestamp += 20000; + rpc_test + .peer_1 + .miner + .sign_nakamoto_block(&mut late_time_block); // post the invalid block proposal let proposal = NakamotoBlockProposal { - block: block.clone(), + block: late_time_block, chain_id: 0x80000000, }; @@ -383,7 +391,7 @@ fn test_try_make_response() { let response = responses.remove(0); - // Wait for the results to be non-empty + // Wait for the results of all 3 requests loop { info!("Wait for results to be non-empty"); if proposal_observer @@ -405,7 +413,23 @@ fn test_try_make_response() { let mut results = observer.results.lock().unwrap(); let result = results.remove(0); - assert!(result.is_ok()); + match result { + Ok(postblock_proposal::BlockValidateOk { + signer_signature_hash, + cost, + size, + validation_time_ms, + }) => { + assert_eq!( + signer_signature_hash, + good_block.header.signer_signature_hash() + ); + assert_eq!(cost, ExecutionCost::ZERO); + assert_eq!(size, 180); + assert!(validation_time_ms > 0 && validation_time_ms < 60000); + } + _ => panic!("expected ok"), + } let result = results.remove(0); match result { diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index cb7d310321..b93171916c 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1077,7 +1077,7 @@ impl Relayer { sort_handle, &staging_db_tx, headers_conn, - reward_set, + &reward_set, obtained_method, )?; staging_db_tx.commit()?; diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 8144cd8ec5..4d6eec8922 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1730,8 +1730,8 @@ mod test { let parent_burn_block_hash = BurnchainHeaderHash([0; 32]); let parent_burn_block_height = 0; let parent_burn_block_timestamp = 0; - let anchored_consumed = ExecutionCost::zero(); - let mblock_confirmed_consumed = ExecutionCost::zero(); + let anchored_consumed = ExecutionCost::ZERO; + let mblock_confirmed_consumed = ExecutionCost::ZERO; let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); let block_timestamp = Some(123456); @@ -1800,8 +1800,8 @@ mod test { let parent_burn_block_hash = BurnchainHeaderHash([0; 32]); let parent_burn_block_height = 0; let parent_burn_block_timestamp = 0; - let anchored_consumed = ExecutionCost::zero(); - let mblock_confirmed_consumed = ExecutionCost::zero(); + let anchored_consumed = ExecutionCost::ZERO; + let mblock_confirmed_consumed = ExecutionCost::ZERO; let pox_constants = PoxConstants::testnet_default(); let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); let block_timestamp = Some(123456); diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 19af89a3bc..09f8c7285f 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -44,7 +44,8 @@ use crate::run_loop::RegisteredKey; pub mod miner; pub mod peer; pub mod relayer; -pub mod sign_coordinator; +pub mod signer_coordinator; +pub mod stackerdb_listener; use self::peer::PeerThread; use self::relayer::{RelayerDirective, RelayerThread}; diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 745ae03fc9..6a4ea39b60 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -47,8 +47,8 @@ use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::vrf::VRFProof; use super::relayer::RelayerThread; -use super::sign_coordinator::SignCoordinator; use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; +use crate::nakamoto_node::signer_coordinator::SignerCoordinator; use crate::nakamoto_node::VRF_MOCK_MINER_KEY; use crate::neon_node; use crate::run_loop::nakamoto::Globals; @@ -139,6 +139,8 @@ pub struct BlockMinerThread { burnchain: Burnchain, /// Last block mined last_block_mined: Option, + /// Number of blocks mined since a tenure change/extend + mined_blocks: u64, /// Copy of the node's registered VRF key registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner @@ -176,6 +178,7 @@ impl BlockMinerThread { keychain: rt.keychain.clone(), burnchain: rt.burnchain.clone(), last_block_mined: None, + mined_blocks: 0, registered_key, burn_election_block, burn_block, @@ -292,172 +295,257 @@ impl BlockMinerThread { let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true)?; let mut last_block_rejected = false; + let reward_set = self.load_signer_set()?; + let Some(miner_privkey) = self.config.miner.mining_key else { + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; + let sortdb = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + // Start the signer coordinator + let mut coordinator = SignerCoordinator::new( + self.event_dispatcher.stackerdb_channel.clone(), + self.globals.should_keep_running.clone(), + &reward_set, + &burn_tip, + &self.burnchain, + miner_privkey, + &self.config, + ) + .map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to initialize the signing coordinator. Cannot mine! {e:?}" + )) + })?; + // now, actually run this tenure loop { - #[cfg(test)] - if *TEST_MINE_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Mining is stalled due to testing directive"); - while *TEST_MINE_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - warn!("Mining is no longer stalled due to testing directive. Continuing..."); + if let Err(e) = self.miner_main_loop( + &mut coordinator, + &sortdb, + &mut stackerdbs, + &mut last_block_rejected, + &reward_set, + ) { + // Before stopping this miner, shutdown the coordinator thread. + coordinator.shutdown(); + return Err(e); } - let new_block = loop { - // If we're mock mining, we may not have processed the block that the - // actual tenure winner committed to yet. So, before attempting to - // mock mine, check if the parent is processed. - if self.config.get_node_config(false).mock_mining { - let burn_db_path = self.config.get_burn_db_file_path(); - let mut burn_db = SortitionDB::open( - &burn_db_path, - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); - let burn_tip_changed = self.check_burn_tip_changed(&burn_db); - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) - .expect("FATAL: could not open chainstate DB"); - match burn_tip_changed - .and_then(|_| self.load_block_parent_info(&mut burn_db, &mut chain_state)) - { - Ok(..) => {} - Err(NakamotoNodeError::ParentNotFound) => { - info!("Mock miner has not processed parent block yet, sleeping and trying again"); - thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); - continue; - } - Err(e) => { - warn!("Mock miner failed to load parent info: {e:?}"); - return Err(e); - } - } - } + } + } - match self.mine_block() { - Ok(x) => { - if !self.validate_timestamp(&x)? { - info!("Block mined too quickly. Will try again."; - "block_timestamp" => x.header.timestamp, - ); - continue; - } - break Some(x); - } - Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { - info!("Miner interrupted while mining, will try again"); - // sleep, and try again. if the miner was interrupted because the burnchain - // view changed, the next `mine_block()` invocation will error + /// The main loop for the miner thread. This is where the miner will mine + /// blocks and then attempt to sign and broadcast them. + fn miner_main_loop( + &mut self, + coordinator: &mut SignerCoordinator, + sortdb: &SortitionDB, + stackerdbs: &mut StackerDBs, + last_block_rejected: &mut bool, + reward_set: &RewardSet, + ) -> Result<(), NakamotoNodeError> { + #[cfg(test)] + if *TEST_MINE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Mining is stalled due to testing directive"); + while *TEST_MINE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + warn!("Mining is no longer stalled due to testing directive. Continuing..."); + } + let new_block = loop { + // If we're mock mining, we may not have processed the block that the + // actual tenure winner committed to yet. So, before attempting to + // mock mine, check if the parent is processed. + if self.config.get_node_config(false).mock_mining { + let burn_db_path = self.config.get_burn_db_file_path(); + let mut burn_db = + SortitionDB::open(&burn_db_path, true, self.burnchain.pox_constants.clone()) + .expect("FATAL: could not open sortition DB"); + let burn_tip_changed = self.check_burn_tip_changed(&burn_db); + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + match burn_tip_changed + .and_then(|_| self.load_block_parent_info(&mut burn_db, &mut chain_state)) + { + Ok(..) => {} + Err(NakamotoNodeError::ParentNotFound) => { + info!("Mock miner has not processed parent block yet, sleeping and trying again"); thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); continue; } - Err(NakamotoNodeError::MiningFailure( - ChainstateError::NoTransactionsToMine, - )) => { - debug!("Miner did not find any transactions to mine"); - break None; - } Err(e) => { - warn!("Failed to mine block: {e:?}"); - - // try again, in case a new sortition is pending - self.globals - .raise_initiative(format!("MiningFailure: {e:?}")); - return Err(ChainstateError::MinerAborted.into()); + warn!("Mock miner failed to load parent info: {e:?}"); + return Err(e); } } - }; - - if let Some(mut new_block) = new_block { - Self::fault_injection_block_broadcast_stall(&new_block); - let (reward_set, signer_signature) = match self - .gather_signatures(&mut new_block, &mut stackerdbs) - { - Ok(x) => x, - Err(e) => match e { - NakamotoNodeError::StacksTipChanged => { - info!("Stacks tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Err(e); - } - NakamotoNodeError::BurnchainTipChanged => { - info!("Burnchain tip changed while waiting for signatures"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - return Err(e); - } - _ => { - // Sleep for a bit to allow signers to catch up - let pause_ms = if last_block_rejected { - self.config.miner.subsequent_rejection_pause_ms - } else { - self.config.miner.first_rejection_pause_ms - }; - - error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); - thread::sleep(Duration::from_millis(pause_ms)); - last_block_rejected = true; - continue; - } - }, - }; - last_block_rejected = false; + } - new_block.header.signer_signature = signer_signature; - if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { - warn!("Error accepting own block: {e:?}. Will try mining again."); + match self.mine_block(coordinator) { + Ok(x) => { + if !self.validate_timestamp(&x)? { + info!("Block mined too quickly. Will try again."; + "block_timestamp" => x.header.timestamp, + ); + continue; + } + break Some(x); + } + Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { + info!("Miner interrupted while mining, will try again"); + // sleep, and try again. if the miner was interrupted because the burnchain + // view changed, the next `mine_block()` invocation will error + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); continue; - } else { - info!( - "Miner: Block signed by signer set and broadcasted"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "stacks_block_hash" => %new_block.header.block_hash(), - "stacks_block_id" => %new_block.header.block_id(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); } + Err(NakamotoNodeError::MiningFailure(ChainstateError::NoTransactionsToMine)) => { + debug!("Miner did not find any transactions to mine"); + break None; + } + Err(e) => { + warn!("Failed to mine block: {e:?}"); - // update mined-block counters and mined-tenure counters - self.globals.counters.bump_naka_mined_blocks(); - if self.last_block_mined.is_some() { - // this is the first block of the tenure, bump tenure counter - self.globals.counters.bump_naka_mined_tenures(); + // try again, in case a new sortition is pending + self.globals + .raise_initiative(format!("MiningFailure: {e:?}")); + return Err(ChainstateError::MinerAborted.into()); } + } + }; - // wake up chains coordinator - Self::fault_injection_block_announce_stall(&new_block); - self.globals.coord().announce_new_stacks_block(); + if let Some(mut new_block) = new_block { + Self::fault_injection_block_broadcast_stall(&new_block); + let signer_signature = match self.propose_block( + coordinator, + &mut new_block, + sortdb, + stackerdbs, + ) { + Ok(x) => x, + Err(e) => match e { + NakamotoNodeError::StacksTipChanged => { + info!("Stacks tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + NakamotoNodeError::BurnchainTipChanged => { + info!("Burnchain tip changed while waiting for signatures"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + return Err(e); + } + _ => { + // Sleep for a bit to allow signers to catch up + let pause_ms = if *last_block_rejected { + self.config.miner.subsequent_rejection_pause_ms + } else { + self.config.miner.first_rejection_pause_ms + }; + + error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); + thread::sleep(Duration::from_millis(pause_ms)); + *last_block_rejected = true; + return Ok(()); + } + }, + }; + *last_block_rejected = false; - self.last_block_mined = Some(new_block); + new_block.header.signer_signature = signer_signature; + if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { + warn!("Error accepting own block: {e:?}. Will try mining again."); + return Ok(()); + } else { + info!( + "Miner: Block signed by signer set and broadcasted"; + "signer_sighash" => %new_block.header.signer_signature_hash(), + "stacks_block_hash" => %new_block.header.block_hash(), + "stacks_block_id" => %new_block.header.block_id(), + "block_height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash, + ); } - let Ok(sort_db) = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) else { - error!("Failed to open sortition DB. Will try mining again."); - continue; - }; + // update mined-block counters and mined-tenure counters + self.globals.counters.bump_naka_mined_blocks(); + if self.last_block_mined.is_some() { + // this is the first block of the tenure, bump tenure counter + self.globals.counters.bump_naka_mined_tenures(); + } - let wait_start = Instant::now(); - while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { - thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); - if self.check_burn_tip_changed(&sort_db).is_err() { - return Err(NakamotoNodeError::BurnchainTipChanged); - } + // wake up chains coordinator + Self::fault_injection_block_announce_stall(&new_block); + self.globals.coord().announce_new_stacks_block(); + + self.last_block_mined = Some(new_block); + self.mined_blocks += 1; + } + + let Ok(sort_db) = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) else { + error!("Failed to open sortition DB. Will try mining again."); + return Ok(()); + }; + + let wait_start = Instant::now(); + while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { + thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); + if self.check_burn_tip_changed(&sort_db).is_err() { + return Err(NakamotoNodeError::BurnchainTipChanged); } } + + Ok(()) + } + + fn propose_block( + &self, + coordinator: &mut SignerCoordinator, + new_block: &mut NakamotoBlock, + sortdb: &SortitionDB, + stackerdbs: &mut StackerDBs, + ) -> Result, NakamotoNodeError> { + if self.config.get_node_config(false).mock_mining { + // If we're mock mining, we don't actually propose the block. + return Ok(Vec::new()); + } + + let mut chain_state = + neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { + NakamotoNodeError::SigningCoordinatorFailure(format!( + "Failed to open chainstate DB. Cannot mine! {e:?}" + )) + })?; + coordinator.propose_block( + new_block, + &self.burn_block, + &self.burnchain, + sortdb, + &mut chain_state, + stackerdbs, + &self.globals.counters, + &self.burn_election_block.consensus_hash, + ) } /// Load the signer set active for this miner's blocks. This is the @@ -525,68 +613,6 @@ impl BlockMinerThread { Ok(reward_set) } - /// Gather a list of signatures from the signers for the block - fn gather_signatures( - &mut self, - new_block: &mut NakamotoBlock, - stackerdbs: &mut StackerDBs, - ) -> Result<(RewardSet, Vec), NakamotoNodeError> { - let Some(miner_privkey) = self.config.miner.mining_key else { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open sortition DB. Cannot mine! {e:?}" - )) - })?; - - let reward_set = self.load_signer_set()?; - - if self.config.get_node_config(false).mock_mining { - return Ok((reward_set, Vec::new())); - } - - let mut coordinator = SignCoordinator::new( - &reward_set, - miner_privkey, - &self.config, - self.globals.should_keep_running.clone(), - self.event_dispatcher.stackerdb_channel.clone(), - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - })?; - - let mut chain_state = - neon_node::open_chainstate_with_faults(&self.config).map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to open chainstate DB. Cannot mine! {e:?}" - )) - })?; - - let signature = coordinator.run_sign_v0( - new_block, - &self.burn_block, - &self.burnchain, - &sort_db, - &mut chain_state, - stackerdbs, - &self.globals.counters, - &self.burn_election_block.consensus_hash, - )?; - - Ok((reward_set, signature)) - } - /// Fault injection -- possibly fail to broadcast /// Return true to drop the block fn fault_injection_broadcast_fail(&self) -> bool { @@ -611,7 +637,7 @@ impl BlockMinerThread { sort_db: &SortitionDB, chain_state: &mut StacksChainState, block: &NakamotoBlock, - reward_set: RewardSet, + reward_set: &RewardSet, ) -> Result<(), ChainstateError> { if Self::fault_injection_skip_block_broadcast() { warn!( @@ -668,9 +694,20 @@ impl BlockMinerThread { fn broadcast( &mut self, block: NakamotoBlock, - reward_set: RewardSet, + reward_set: &RewardSet, stackerdbs: &StackerDBs, ) -> Result<(), NakamotoNodeError> { + if self.config.get_node_config(false).mock_mining { + // If we're mock mining, we don't actually broadcast the block. + return Ok(()); + } + + if self.config.miner.mining_key.is_none() { + return Err(NakamotoNodeError::MinerConfigurationFailed( + "No mining key configured, cannot mine", + )); + }; + let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); let sort_db = SortitionDB::open( @@ -680,12 +717,6 @@ impl BlockMinerThread { ) .expect("FATAL: could not open sortition DB"); - if self.config.miner.mining_key.is_none() { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; - // push block via p2p block push self.broadcast_p2p(&sort_db, &mut chain_state, &block, reward_set) .map_err(NakamotoNodeError::AcceptFailure)?; @@ -705,7 +736,7 @@ impl BlockMinerThread { let miners_contract_id = boot_code_id(MINERS_NAME, chain_state.mainnet); let mut miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); - SignCoordinator::send_miners_message( + SignerCoordinator::send_miners_message( miner_privkey, &sort_db, &self.burn_block, @@ -997,8 +1028,12 @@ impl BlockMinerThread { #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block. - fn mine_block(&mut self) -> Result { + fn mine_block( + &mut self, + coordinator: &mut SignerCoordinator, + ) -> Result { debug!("block miner thread ID is {:?}", thread::current().id()); + info!("Miner: Mining block"); let burn_db_path = self.config.get_burn_db_file_path(); let reward_set = self.load_signer_set()?; @@ -1041,6 +1076,7 @@ impl BlockMinerThread { &parent_block_info, vrf_proof, target_epoch_id, + coordinator, )?; parent_block_info.stacks_parent_header.microblock_tail = None; @@ -1122,24 +1158,45 @@ impl BlockMinerThread { #[cfg_attr(test, mutants::skip)] /// Create the tenure start info for the block we're going to build fn make_tenure_start_info( - &self, + &mut self, chainstate: &StacksChainState, parent_block_info: &ParentStacksBlockInfo, vrf_proof: VRFProof, target_epoch_id: StacksEpochId, + coordinator: &mut SignerCoordinator, ) -> Result { let current_miner_nonce = parent_block_info.coinbase_nonce; - let Some(parent_tenure_info) = &parent_block_info.parent_tenure else { - return Ok(NakamotoTenureInfo { - coinbase_tx: None, - tenure_change_tx: None, - }); + let parent_tenure_info = match &parent_block_info.parent_tenure { + Some(info) => info.clone(), + None => { + // We may be able to extend the current tenure + if self.last_block_mined.is_none() { + debug!("Miner: No parent tenure and no last block mined"); + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } + ParentTenureInfo { + parent_tenure_blocks: self.mined_blocks, + parent_tenure_consensus_hash: self.burn_election_block.consensus_hash, + } + } }; if self.last_block_mined.is_some() { - return Ok(NakamotoTenureInfo { - coinbase_tx: None, - tenure_change_tx: None, - }); + // Check if we can extend the current tenure + let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); + if get_epoch_time_secs() <= tenure_extend_timestamp { + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } + info!("Miner: Time-based tenure extend"; + "current_timestamp" => get_epoch_time_secs(), + "tenure_extend_timestamp" => tenure_extend_timestamp, + ); + self.tenure_extend_reset(); } let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); @@ -1206,6 +1263,13 @@ impl BlockMinerThread { Ok(()) } } + + fn tenure_extend_reset(&mut self) { + self.reason = MinerReason::Extended { + burn_view_consensus_hash: self.burn_block.consensus_hash, + }; + self.mined_blocks = 0; + } } impl ParentStacksBlockInfo { diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs deleted file mode 100644 index 2b1efcbfc5..0000000000 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ /dev/null @@ -1,615 +0,0 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::BTreeMap; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::mpsc::Receiver; -use std::sync::{Arc, Mutex}; -use std::time::Duration; - -use hashbrown::{HashMap, HashSet}; -use libsigner::v0::messages::{ - BlockAccepted, BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0, -}; -use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; -use stacks::burnchains::Burnchain; -use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; -use stacks::chainstate::stacks::db::StacksChainState; -use stacks::chainstate::stacks::events::StackerDBChunksEvent; -use stacks::chainstate::stacks::Error as ChainstateError; -use stacks::libstackerdb::StackerDBChunkData; -use stacks::net::stackerdb::StackerDBs; -use stacks::types::PublicKey; -use stacks::util::hash::MerkleHashFunc; -use stacks::util::secp256k1::MessageSignature; -use stacks::util_lib::boot::boot_code_id; -use stacks_common::bitvec::BitVec; -use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; - -use super::Error as NakamotoNodeError; -use crate::event_dispatcher::StackerDBChannel; -use crate::neon::Counters; -use crate::Config; - -/// Fault injection flag to prevent the miner from seeing enough signer signatures. -/// Used to test that the signers will broadcast a block if it gets enough signatures -#[cfg(test)] -pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mutex::new(None); - -/// How long should the coordinator poll on the event receiver before -/// waking up to check timeouts? -static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); - -/// The `SignCoordinator` struct sole function is to serve as the coordinator for Nakamoto block signing. -/// This struct is used by Nakamoto miners to act as the coordinator for the blocks they produce. -pub struct SignCoordinator { - receiver: Option>, - message_key: StacksPrivateKey, - is_mainnet: bool, - miners_session: StackerDBSession, - signer_entries: HashMap, - weight_threshold: u32, - total_weight: u32, - keep_running: Arc, - pub next_signer_bitvec: BitVec<4000>, - stackerdb_channel: Arc>, -} - -impl Drop for SignCoordinator { - fn drop(&mut self) { - let stackerdb_channel = self - .stackerdb_channel - .lock() - .expect("FATAL: failed to lock stackerdb channel"); - stackerdb_channel.replace_receiver(self.receiver.take().expect( - "FATAL: lost possession of the StackerDB channel before dropping SignCoordinator", - )); - } -} - -impl SignCoordinator { - /// * `reward_set` - the active reward set data, used to construct the signer - /// set parameters. - /// * `aggregate_public_key` - the active aggregate key for this cycle - pub fn new( - reward_set: &RewardSet, - message_key: StacksPrivateKey, - config: &Config, - keep_running: Arc, - stackerdb_channel: Arc>, - ) -> Result { - let is_mainnet = config.is_mainnet(); - let Some(ref reward_set_signers) = reward_set.signers else { - error!("Could not initialize signing coordinator for reward set without signer"); - debug!("reward set: {reward_set:?}"); - return Err(ChainstateError::NoRegisteredSigners(0)); - }; - - let signer_entries = SignerEntries::parse(is_mainnet, reward_set_signers).map_err(|e| { - ChainstateError::InvalidStacksBlock(format!( - "Failed to parse NakamotoSignerEntries: {e:?}" - )) - })?; - let rpc_socket = config - .node - .get_rpc_loopback() - .ok_or_else(|| ChainstateError::MinerAborted)?; - let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); - let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); - - let next_signer_bitvec: BitVec<4000> = BitVec::zeros( - reward_set_signers - .clone() - .len() - .try_into() - .expect("FATAL: signer set length greater than u16"), - ) - .expect("FATAL: unable to construct initial bitvec for signer set"); - - debug!( - "Initializing miner/coordinator"; - "num_signers" => signer_entries.signer_pks.len(), - "signer_public_keys" => ?signer_entries.signer_pks, - ); - - let total_weight = reward_set.total_signing_weight().map_err(|e| { - warn!("Failed to calculate total weight for the reward set: {e:?}"); - ChainstateError::NoRegisteredSigners(0) - })?; - - let threshold = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight)?; - - let signer_public_keys = reward_set_signers - .iter() - .cloned() - .enumerate() - .map(|(idx, signer)| { - let Ok(slot_id) = u32::try_from(idx) else { - return Err(ChainstateError::InvalidStacksBlock( - "Signer index exceeds u32".into(), - )); - }; - Ok((slot_id, signer)) - }) - .collect::, ChainstateError>>()?; - #[cfg(test)] - { - // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - use crate::tests::nakamoto_integrations::TEST_SIGNING; - if TEST_SIGNING.lock().unwrap().is_some() { - debug!("Short-circuiting spinning up coordinator from signer commitments. Using test signers channel."); - let (receiver, replaced_other) = stackerdb_channel - .lock() - .expect("FATAL: failed to lock StackerDB channel") - .register_miner_coordinator(); - if replaced_other { - warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); - } - let sign_coordinator = Self { - message_key, - receiver: Some(receiver), - is_mainnet, - miners_session, - next_signer_bitvec, - signer_entries: signer_public_keys, - weight_threshold: threshold, - total_weight, - keep_running, - stackerdb_channel, - }; - return Ok(sign_coordinator); - } - } - - let (receiver, replaced_other) = stackerdb_channel - .lock() - .expect("FATAL: failed to lock StackerDB channel") - .register_miner_coordinator(); - if replaced_other { - warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); - } - - Ok(Self { - receiver: Some(receiver), - message_key, - is_mainnet, - miners_session, - next_signer_bitvec, - signer_entries: signer_public_keys, - weight_threshold: threshold, - total_weight, - keep_running, - stackerdb_channel, - }) - } - - /// Send a message over the miners contract using a `StacksPrivateKey` - #[allow(clippy::too_many_arguments)] - pub fn send_miners_message( - miner_sk: &StacksPrivateKey, - sortdb: &SortitionDB, - tip: &BlockSnapshot, - stackerdbs: &StackerDBs, - message: M, - miner_slot_id: MinerSlotID, - is_mainnet: bool, - miners_session: &mut StackerDBSession, - election_sortition: &ConsensusHash, - ) -> Result<(), String> { - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, election_sortition) - .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? - else { - return Err("No slot for miner".into()); - }; - - let slot_id = slot_range - .start - .saturating_add(miner_slot_id.to_u8().into()); - if !slot_range.contains(&slot_id) { - return Err("Not enough slots for miner messages".into()); - } - // Get the LAST slot version number written to the DB. If not found, use 0. - // Add 1 to get the NEXT version number - // Note: we already check above for the slot's existence - let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); - let slot_version = stackerdbs - .get_slot_version(&miners_contract_id, slot_id) - .map_err(|e| format!("Failed to read slot version: {e:?}"))? - .unwrap_or(0) - .saturating_add(1); - let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message.serialize_to_vec()); - chunk - .sign(miner_sk) - .map_err(|_| "Failed to sign StackerDB chunk")?; - - match miners_session.put_chunk(&chunk) { - Ok(ack) => { - if ack.accepted { - debug!("Wrote message to stackerdb: {ack:?}"); - Ok(()) - } else { - Err(format!("{ack:?}")) - } - } - Err(e) => Err(format!("{e:?}")), - } - } - - /// Do we ignore signer signatures? - #[cfg(test)] - fn fault_injection_ignore_signatures() -> bool { - if *TEST_IGNORE_SIGNERS.lock().unwrap() == Some(true) { - return true; - } - false - } - - #[cfg(not(test))] - fn fault_injection_ignore_signatures() -> bool { - false - } - - /// Check if the tenure needs to change - fn check_burn_tip_changed(sortdb: &SortitionDB, burn_block: &BlockSnapshot) -> bool { - let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - - if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash { - info!("SignCoordinator: Cancel signature aggregation; burnchain tip has changed"); - true - } else { - false - } - } - - /// Start gathering signatures for a Nakamoto block. - /// This function begins by sending a `BlockProposal` message - /// to the signers, and then waits for the signers to respond - /// with their signatures. It does so in two ways, concurrently: - /// * It waits for signer StackerDB messages with signatures. If enough signatures can be - /// found, then the block can be broadcast. - /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are - /// loaded and returned. This can happen if the node receives the block via a signer who - /// fetched all signatures and assembled the signature vector, all before we could. - // Mutants skip here: this function is covered via integration tests, - // which the mutation testing does not see. - #[cfg_attr(test, mutants::skip)] - #[allow(clippy::too_many_arguments)] - pub fn run_sign_v0( - &mut self, - block: &NakamotoBlock, - burn_tip: &BlockSnapshot, - burnchain: &Burnchain, - sortdb: &SortitionDB, - chain_state: &mut StacksChainState, - stackerdbs: &StackerDBs, - counters: &Counters, - election_sortition: &ConsensusHash, - ) -> Result, NakamotoNodeError> { - let reward_cycle_id = burnchain - .block_height_to_reward_cycle(burn_tip.block_height) - .expect("FATAL: tried to initialize coordinator before first burn block height"); - - let block_proposal = BlockProposal { - block: block.clone(), - burn_height: burn_tip.block_height, - reward_cycle: reward_cycle_id, - }; - - let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); - debug!("Sending block proposal message to signers"; - "signer_signature_hash" => %block.header.signer_signature_hash(), - ); - Self::send_miners_message::( - &self.message_key, - sortdb, - burn_tip, - stackerdbs, - block_proposal_message, - MinerSlotID::BlockProposal, - self.is_mainnet, - &mut self.miners_session, - election_sortition, - ) - .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; - counters.bump_naka_proposed_blocks(); - - #[cfg(test)] - { - info!( - "SignCoordinator: sent block proposal to .miners, waiting for test signing channel" - ); - // In test mode, short-circuit waiting for the signers if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - if let Some(signatures) = - crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() - { - debug!("Short-circuiting waiting for signers, using test signature"); - return Ok(signatures); - } - } - - let Some(ref mut receiver) = self.receiver else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Failed to obtain the StackerDB event receiver".into(), - )); - }; - - let mut total_weight_signed: u32 = 0; - let mut total_reject_weight: u32 = 0; - let mut responded_signers = HashSet::new(); - let mut gathered_signatures = BTreeMap::new(); - - info!("SignCoordinator: beginning to watch for block signatures OR posted blocks."; - "threshold" => self.weight_threshold, - ); - - loop { - // look in the nakamoto staging db -- a block can only get stored there if it has - // enough signing weight to clear the threshold - if let Ok(Some((stored_block, _sz))) = chain_state - .nakamoto_blocks_db() - .get_nakamoto_block(&block.block_id()) - .map_err(|e| { - warn!( - "Failed to query chainstate for block {}: {e:?}", - &block.block_id() - ); - e - }) - { - debug!("SignCoordinator: Found signatures in relayed block"); - counters.bump_naka_signer_pushed_blocks(); - return Ok(stored_block.header.signer_signature); - } - - if Self::check_burn_tip_changed(sortdb, burn_tip) { - debug!("SignCoordinator: Exiting due to new burnchain tip"); - return Err(NakamotoNodeError::BurnchainTipChanged); - } - - // one of two things can happen: - // * we get enough signatures from stackerdb from the signers, OR - // * we see our block get processed in our chainstate (meaning, the signers broadcasted - // the block and our node got it and processed it) - let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { - Ok(event) => event, - Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { - continue; - } - Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "StackerDB event receiver disconnected".into(), - )) - } - }; - - // was the node asked to stop? - if !self.keep_running.load(Ordering::SeqCst) { - info!("SignerCoordinator: received node exit request. Aborting"); - return Err(NakamotoNodeError::ChannelClosed); - } - - // check to see if this event we got is a signer event - let is_signer_event = - event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); - - if !is_signer_event { - debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); - continue; - } - - let modified_slots = &event.modified_slots.clone(); - - let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { - warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); - }) else { - continue; - }; - let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { - debug!("Received signer event other than a signer message. Ignoring."); - continue; - }; - if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { - debug!("Received signer event for other reward cycle. Ignoring."); - continue; - }; - let slot_ids = modified_slots - .iter() - .map(|chunk| chunk.slot_id) - .collect::>(); - - debug!("SignCoordinator: Received messages from signers"; - "count" => messages.len(), - "slot_ids" => ?slot_ids, - "threshold" => self.weight_threshold - ); - - for (message, slot_id) in messages.into_iter().zip(slot_ids) { - let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { - return Err(NakamotoNodeError::SignerSignatureError( - "Signer entry not found".into(), - )); - }; - let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) - else { - return Err(NakamotoNodeError::SignerSignatureError( - "Failed to parse signer public key".into(), - )); - }; - - if responded_signers.contains(&signer_pubkey) { - debug!( - "Signer {slot_id} already responded for block {}. Ignoring {message:?}.", block.header.signer_signature_hash(); - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - continue; - } - - match message { - SignerMessageV0::BlockResponse(BlockResponse::Accepted(accepted)) => { - let BlockAccepted { - signer_signature_hash: response_hash, - signature, - metadata, - } = accepted; - let block_sighash = block.header.signer_signature_hash(); - if block_sighash != response_hash { - warn!( - "Processed signature for a different block. Will try to continue."; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "response_hash" => %response_hash, - "slot_id" => slot_id, - "reward_cycle_id" => reward_cycle_id, - "response_hash" => %response_hash, - "server_version" => %metadata.server_version - ); - continue; - } - debug!("SignCoordinator: Received valid signature from signer"; "slot_id" => slot_id, "signature" => %signature); - let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) - else { - warn!("Got invalid signature from a signer. Ignoring."); - continue; - }; - if !valid_sig { - warn!( - "Processed signature but didn't validate over the expected block. Ignoring"; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash, - "slot_id" => slot_id, - ); - continue; - } - - if Self::fault_injection_ignore_signatures() { - warn!("SignCoordinator: fault injection: ignoring well-formed signature for block"; - "block_signer_sighash" => %block_sighash, - "signer_pubkey" => signer_pubkey.to_hex(), - "signer_slot_id" => slot_id, - "signature" => %signature, - "signer_weight" => signer_entry.weight, - "total_weight_signed" => total_weight_signed, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - continue; - } - - if !gathered_signatures.contains_key(&slot_id) { - total_weight_signed = total_weight_signed - .checked_add(signer_entry.weight) - .expect("FATAL: total weight signed exceeds u32::MAX"); - } - - info!("SignCoordinator: Signature Added to block"; - "block_signer_sighash" => %block_sighash, - "signer_pubkey" => signer_pubkey.to_hex(), - "signer_slot_id" => slot_id, - "signature" => %signature, - "signer_weight" => signer_entry.weight, - "total_weight_signed" => total_weight_signed, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id(), - "server_version" => metadata.server_version, - ); - gathered_signatures.insert(slot_id, signature); - responded_signers.insert(signer_pubkey); - } - SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { - let block_sighash = block.header.signer_signature_hash(); - if block_sighash != rejected_data.signer_signature_hash { - warn!( - "Processed rejection for a different block. Will try to continue."; - "block_signer_signature_hash" => %block_sighash, - "rejected_data.signer_signature_hash" => %rejected_data.signer_signature_hash, - "slot_id" => slot_id, - "reward_cycle_id" => reward_cycle_id, - ); - continue; - } - let rejected_pubkey = match rejected_data.recover_public_key() { - Ok(rejected_pubkey) => { - if rejected_pubkey != signer_pubkey { - warn!("Recovered public key from rejected data does not match signer's public key. Ignoring."); - continue; - } - rejected_pubkey - } - Err(e) => { - warn!("Failed to recover public key from rejected data: {e:?}. Ignoring."); - continue; - } - }; - responded_signers.insert(rejected_pubkey); - debug!( - "Signer {slot_id} rejected our block {}/{}", - &block.header.consensus_hash, - &block.header.block_hash() - ); - total_reject_weight = total_reject_weight - .checked_add(signer_entry.weight) - .expect("FATAL: total weight rejected exceeds u32::MAX"); - - if total_reject_weight.saturating_add(self.weight_threshold) - > self.total_weight - { - debug!( - "{total_reject_weight}/{} signers vote to reject our block {}/{}", - self.total_weight, - &block.header.consensus_hash, - &block.header.block_hash() - ); - counters.bump_naka_rejected_blocks(); - return Err(NakamotoNodeError::SignersRejected); - } - continue; - } - SignerMessageV0::BlockProposal(_) => { - debug!("Received block proposal message. Ignoring."); - continue; - } - SignerMessageV0::BlockPushed(_) => { - debug!("Received block pushed message. Ignoring."); - continue; - } - SignerMessageV0::MockSignature(_) - | SignerMessageV0::MockProposal(_) - | SignerMessageV0::MockBlock(_) => { - debug!("Received mock message. Ignoring."); - continue; - } - }; - } - // After gathering all signatures, return them if we've hit the threshold - if total_weight_signed >= self.weight_threshold { - info!("SignCoordinator: Received enough signatures. Continuing."; - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id() - ); - return Ok(gathered_signatures.values().cloned().collect()); - } - } - } -} diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs new file mode 100644 index 0000000000..70c9aab190 --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -0,0 +1,376 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::atomic::AtomicBool; +use std::sync::{Arc, Mutex}; +use std::thread::JoinHandle; + +use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; +use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::db::sortdb::SortitionDB; +use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; +use stacks::chainstate::stacks::db::StacksChainState; +use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::codec::StacksMessageCodec; +use stacks::libstackerdb::StackerDBChunkData; +use stacks::net::stackerdb::StackerDBs; +use stacks::types::chainstate::{StacksBlockId, StacksPrivateKey}; +use stacks::util::hash::Sha512Trunc256Sum; +use stacks::util::secp256k1::MessageSignature; +use stacks::util_lib::boot::boot_code_id; + +use super::stackerdb_listener::StackerDBListenerComms; +use super::Error as NakamotoNodeError; +use crate::event_dispatcher::StackerDBChannel; +use crate::nakamoto_node::stackerdb_listener::{StackerDBListener, EVENT_RECEIVER_POLL}; +use crate::neon::Counters; +use crate::Config; + +/// The state of the signer database listener, used by the miner thread to +/// interact with the signer listener. +pub struct SignerCoordinator { + /// The private key used to sign messages from the miner + message_key: StacksPrivateKey, + /// Is this mainnet? + is_mainnet: bool, + /// The session for writing to the miners contract in the stackerdb + miners_session: StackerDBSession, + /// The total weight of all signers + total_weight: u32, + /// The weight threshold for block approval + weight_threshold: u32, + /// Interface to the StackerDB listener thread's data + stackerdb_comms: StackerDBListenerComms, + /// Keep running flag for the signer DB listener thread + keep_running: Arc, + /// Handle for the signer DB listener thread + listener_thread: Option>, +} + +impl SignerCoordinator { + /// Create a new `SignerCoordinator` instance. + /// This will spawn a new thread to listen for messages from the signer DB. + pub fn new( + stackerdb_channel: Arc>, + node_keep_running: Arc, + reward_set: &RewardSet, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + message_key: StacksPrivateKey, + config: &Config, + ) -> Result { + info!("SignerCoordinator: starting up"); + let keep_running = Arc::new(AtomicBool::new(true)); + + // Create the stacker DB listener + let mut listener = StackerDBListener::new( + stackerdb_channel, + node_keep_running.clone(), + keep_running.clone(), + reward_set, + burn_tip, + burnchain, + )?; + let is_mainnet = config.is_mainnet(); + let rpc_socket = config + .node + .get_rpc_loopback() + .ok_or_else(|| ChainstateError::MinerAborted)?; + let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); + let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + + let mut sc = Self { + message_key, + is_mainnet, + miners_session, + total_weight: listener.total_weight, + weight_threshold: listener.weight_threshold, + stackerdb_comms: listener.get_comms(), + keep_running, + listener_thread: None, + }; + + // Spawn the signer DB listener thread + let listener_thread = std::thread::Builder::new() + .name("stackerdb_listener".to_string()) + .spawn(move || { + if let Err(e) = listener.run() { + error!("StackerDBListener: exited with error: {e:?}"); + } + }) + .map_err(|e| { + error!("Failed to spawn stackerdb_listener thread: {e:?}"); + ChainstateError::MinerAborted + })?; + + sc.listener_thread = Some(listener_thread); + + Ok(sc) + } + + /// Send a message over the miners contract using a `StacksPrivateKey` + #[allow(clippy::too_many_arguments)] + pub fn send_miners_message( + miner_sk: &StacksPrivateKey, + sortdb: &SortitionDB, + tip: &BlockSnapshot, + stackerdbs: &StackerDBs, + message: M, + miner_slot_id: MinerSlotID, + is_mainnet: bool, + miners_session: &mut StackerDBSession, + election_sortition: &ConsensusHash, + ) -> Result<(), String> { + let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, election_sortition) + .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? + else { + return Err("No slot for miner".into()); + }; + + let slot_id = slot_range + .start + .saturating_add(miner_slot_id.to_u8().into()); + if !slot_range.contains(&slot_id) { + return Err("Not enough slots for miner messages".into()); + } + // Get the LAST slot version number written to the DB. If not found, use 0. + // Add 1 to get the NEXT version number + // Note: we already check above for the slot's existence + let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); + let slot_version = stackerdbs + .get_slot_version(&miners_contract_id, slot_id) + .map_err(|e| format!("Failed to read slot version: {e:?}"))? + .unwrap_or(0) + .saturating_add(1); + let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message.serialize_to_vec()); + chunk + .sign(miner_sk) + .map_err(|_| "Failed to sign StackerDB chunk")?; + + match miners_session.put_chunk(&chunk) { + Ok(ack) => { + if ack.accepted { + debug!("Wrote message to stackerdb: {ack:?}"); + Ok(()) + } else { + Err(format!("{ack:?}")) + } + } + Err(e) => Err(format!("{e:?}")), + } + } + + /// Propose a Nakamoto block and gather signatures for it. + /// This function begins by sending a `BlockProposal` message to the + /// signers, and then it waits for the signers to respond with their + /// signatures. It does so in two ways, concurrently: + /// * It waits for the signer DB listener to collect enough signatures to + /// accept or reject the block + /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are + /// loaded and returned. This can happen if the node receives the block via a signer who + /// fetched all signatures and assembled the signature vector, all before we could. + // Mutants skip here: this function is covered via integration tests, + // which the mutation testing does not see. + #[cfg_attr(test, mutants::skip)] + #[allow(clippy::too_many_arguments)] + pub fn propose_block( + &mut self, + block: &NakamotoBlock, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + sortdb: &SortitionDB, + chain_state: &mut StacksChainState, + stackerdbs: &StackerDBs, + counters: &Counters, + election_sortition: &ConsensusHash, + ) -> Result, NakamotoNodeError> { + // Add this block to the block status map. + self.stackerdb_comms.insert_block(&block.header); + + let reward_cycle_id = burnchain + .block_height_to_reward_cycle(burn_tip.block_height) + .expect("FATAL: tried to initialize coordinator before first burn block height"); + + let block_proposal = BlockProposal { + block: block.clone(), + burn_height: burn_tip.block_height, + reward_cycle: reward_cycle_id, + }; + + let block_proposal_message = SignerMessageV0::BlockProposal(block_proposal); + debug!("Sending block proposal message to signers"; + "signer_signature_hash" => %block.header.signer_signature_hash(), + ); + Self::send_miners_message::( + &self.message_key, + sortdb, + burn_tip, + stackerdbs, + block_proposal_message, + MinerSlotID::BlockProposal, + self.is_mainnet, + &mut self.miners_session, + election_sortition, + ) + .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; + counters.bump_naka_proposed_blocks(); + + #[cfg(test)] + { + info!( + "SignerCoordinator: sent block proposal to .miners, waiting for test signing channel" + ); + // In test mode, short-circuit waiting for the signers if the TEST_SIGNING + // channel has been created. This allows integration tests for the stacks-node + // independent of the stacks-signer. + if let Some(signatures) = + crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() + { + debug!("Short-circuiting waiting for signers, using test signature"); + return Ok(signatures); + } + } + + self.get_block_status( + &block.header.signer_signature_hash(), + &block.block_id(), + chain_state, + sortdb, + burn_tip, + counters, + ) + } + + /// Get the block status for a given block hash. + /// If we have not yet received enough signatures for this block, this + /// method will block until we do. If this block shows up in the staging DB + /// before we have enough signatures, we will return the signatures from + /// there. If a new burnchain tip is detected, we will return an error. + fn get_block_status( + &self, + block_signer_sighash: &Sha512Trunc256Sum, + block_id: &StacksBlockId, + chain_state: &mut StacksChainState, + sortdb: &SortitionDB, + burn_tip: &BlockSnapshot, + counters: &Counters, + ) -> Result, NakamotoNodeError> { + loop { + let block_status = match self.stackerdb_comms.wait_for_block_status( + block_signer_sighash, + EVENT_RECEIVER_POLL, + |status| { + status.total_weight_signed < self.weight_threshold + && status + .total_reject_weight + .saturating_add(self.weight_threshold) + <= self.total_weight + }, + )? { + Some(status) => status, + None => { + // If we just received a timeout, we should check if the burnchain + // tip has changed or if we received this signed block already in + // the staging db. + debug!("SignerCoordinator: Timeout waiting for block signatures"); + + // Look in the nakamoto staging db -- a block can only get stored there + // if it has enough signing weight to clear the threshold. + if let Ok(Some((stored_block, _sz))) = chain_state + .nakamoto_blocks_db() + .get_nakamoto_block(block_id) + .map_err(|e| { + warn!( + "Failed to query chainstate for block: {e:?}"; + "block_id" => %block_id, + "block_signer_sighash" => %block_signer_sighash, + ); + e + }) + { + debug!("SignCoordinator: Found signatures in relayed block"); + counters.bump_naka_signer_pushed_blocks(); + return Ok(stored_block.header.signer_signature); + } + + if Self::check_burn_tip_changed(sortdb, burn_tip) { + debug!("SignCoordinator: Exiting due to new burnchain tip"); + return Err(NakamotoNodeError::BurnchainTipChanged); + } + + continue; + } + }; + + if block_status + .total_reject_weight + .saturating_add(self.weight_threshold) + > self.total_weight + { + info!( + "{}/{} signers vote to reject block", + block_status.total_reject_weight, self.total_weight; + "block_signer_sighash" => %block_signer_sighash, + ); + counters.bump_naka_rejected_blocks(); + return Err(NakamotoNodeError::SignersRejected); + } else if block_status.total_weight_signed >= self.weight_threshold { + info!("Received enough signatures, block accepted"; + "block_signer_sighash" => %block_signer_sighash, + ); + return Ok(block_status.gathered_signatures.values().cloned().collect()); + } else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Unblocked without reaching the threshold".into(), + )); + } + } + } + + /// Get the timestamp at which at least 70% of the signing power should be + /// willing to accept a time-based tenure extension. + pub fn get_tenure_extend_timestamp(&self) -> u64 { + self.stackerdb_comms + .get_tenure_extend_timestamp(self.weight_threshold) + } + + /// Check if the tenure needs to change + fn check_burn_tip_changed(sortdb: &SortitionDB, burn_block: &BlockSnapshot) -> bool { + let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if cur_burn_chain_tip.consensus_hash != burn_block.consensus_hash { + info!("SignerCoordinator: Cancel signature aggregation; burnchain tip has changed"); + true + } else { + false + } + } + + pub fn shutdown(&mut self) { + if let Some(listener_thread) = self.listener_thread.take() { + info!("SignerCoordinator: shutting down stacker db listener thread"); + self.keep_running + .store(false, std::sync::atomic::Ordering::Relaxed); + if let Err(e) = listener_thread.join() { + error!("Failed to join signer listener thread: {e:?}"); + } + debug!("SignerCoordinator: stacker db listener thread has shut down"); + } + } +} diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs new file mode 100644 index 0000000000..00c21ec003 --- /dev/null +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -0,0 +1,556 @@ +// Copyright (C) 2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::BTreeMap; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::mpsc::Receiver; +#[cfg(test)] +use std::sync::LazyLock; +use std::sync::{Arc, Condvar, Mutex}; +use std::time::Duration; + +use hashbrown::{HashMap, HashSet}; +use libsigner::v0::messages::{BlockAccepted, BlockResponse, SignerMessage as SignerMessageV0}; +use libsigner::SignerEvent; +use stacks::burnchains::Burnchain; +use stacks::chainstate::burn::BlockSnapshot; +use stacks::chainstate::nakamoto::NakamotoBlockHeader; +use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, SIGNERS_NAME}; +use stacks::chainstate::stacks::events::StackerDBChunksEvent; +use stacks::chainstate::stacks::Error as ChainstateError; +use stacks::types::chainstate::StacksPublicKey; +use stacks::types::PublicKey; +use stacks::util::get_epoch_time_secs; +use stacks::util::hash::{MerkleHashFunc, Sha512Trunc256Sum}; +use stacks::util::secp256k1::MessageSignature; + +use super::Error as NakamotoNodeError; +use crate::event_dispatcher::StackerDBChannel; +#[cfg(test)] +use crate::neon::TestFlag; + +#[cfg(test)] +/// Fault injection flag to prevent the miner from seeing enough signer signatures. +/// Used to test that the signers will broadcast a block if it gets enough signatures +pub static TEST_IGNORE_SIGNERS: LazyLock = LazyLock::new(TestFlag::default); + +/// How long should the coordinator poll on the event receiver before +/// waking up to check timeouts? +pub static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); + +#[derive(Debug, Clone)] +pub struct BlockStatus { + pub responded_signers: HashSet, + pub gathered_signatures: BTreeMap, + pub total_weight_signed: u32, + pub total_reject_weight: u32, +} + +#[derive(Debug, Clone)] +pub(crate) struct TimestampInfo { + pub timestamp: u64, + pub weight: u32, +} + +/// The listener for the StackerDB, which listens for messages from the +/// signers and tracks the state of block signatures and idle timestamps. +pub struct StackerDBListener { + /// Channel to communicate with StackerDB + stackerdb_channel: Arc>, + /// Receiver end of the StackerDB events channel + receiver: Option>, + /// Flag to shut the node down + node_keep_running: Arc, + /// Flag to shut the listener down + keep_running: Arc, + /// The signer set for this tenure (0 or 1) + signer_set: u32, + /// The total weight of all signers + pub(crate) total_weight: u32, + /// The weight threshold for block approval + pub(crate) weight_threshold: u32, + /// The signer entries for this tenure (keyed by slot_id) + signer_entries: HashMap, + /// Tracks signatures for blocks + /// - key: Sha512Trunc256Sum (signer signature hash) + /// - value: BlockStatus + pub(crate) blocks: Arc<(Mutex>, Condvar)>, + /// Tracks the timestamps from signers to decide when they should be + /// willing to accept time-based tenure extensions + /// - key: StacksPublicKey + /// - value: TimestampInfo + pub(crate) signer_idle_timestamps: Arc>>, +} + +/// Interface for other threads to retrieve info from the StackerDBListener +pub struct StackerDBListenerComms { + /// Tracks signatures for blocks + /// - key: Sha512Trunc256Sum (signer signature hash) + /// - value: BlockStatus + blocks: Arc<(Mutex>, Condvar)>, + /// Tracks the timestamps from signers to decide when they should be + /// willing to accept time-based tenure extensions + /// - key: StacksPublicKey + /// - value: TimestampInfo + signer_idle_timestamps: Arc>>, +} + +impl StackerDBListener { + pub fn new( + stackerdb_channel: Arc>, + node_keep_running: Arc, + keep_running: Arc, + reward_set: &RewardSet, + burn_tip: &BlockSnapshot, + burnchain: &Burnchain, + ) -> Result { + let (receiver, replaced_other) = stackerdb_channel + .lock() + .expect("FATAL: failed to lock StackerDB channel") + .register_miner_coordinator(); + if replaced_other { + warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); + } + + let total_weight = reward_set.total_signing_weight().map_err(|e| { + warn!("Failed to calculate total weight for the reward set: {e:?}"); + ChainstateError::NoRegisteredSigners(0) + })?; + + let weight_threshold = NakamotoBlockHeader::compute_voting_weight_threshold(total_weight)?; + + let reward_cycle_id = burnchain + .block_height_to_reward_cycle(burn_tip.block_height) + .expect("FATAL: tried to initialize coordinator before first burn block height"); + let signer_set = + u32::try_from(reward_cycle_id % 2).expect("FATAL: reward cycle id % 2 exceeds u32"); + + let Some(ref reward_set_signers) = reward_set.signers else { + error!("Could not initialize signing coordinator for reward set without signer"); + debug!("reward set: {reward_set:?}"); + return Err(ChainstateError::NoRegisteredSigners(0)); + }; + + let signer_entries = reward_set_signers + .iter() + .cloned() + .enumerate() + .map(|(idx, signer)| { + let Ok(slot_id) = u32::try_from(idx) else { + return Err(ChainstateError::InvalidStacksBlock( + "Signer index exceeds u32".into(), + )); + }; + Ok((slot_id, signer)) + }) + .collect::, ChainstateError>>()?; + + Ok(Self { + stackerdb_channel, + receiver: Some(receiver), + node_keep_running, + keep_running, + signer_set, + total_weight, + weight_threshold, + signer_entries, + blocks: Arc::new((Mutex::new(HashMap::new()), Condvar::new())), + signer_idle_timestamps: Arc::new(Mutex::new(HashMap::new())), + }) + } + + pub fn get_comms(&self) -> StackerDBListenerComms { + StackerDBListenerComms { + blocks: self.blocks.clone(), + signer_idle_timestamps: self.signer_idle_timestamps.clone(), + } + } + + /// Run the StackerDB listener. + pub fn run(&mut self) -> Result<(), NakamotoNodeError> { + info!("StackerDBListener: Starting up"); + + let Some(receiver) = &self.receiver else { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "StackerDBListener: Failed to obtain the StackerDB event receiver".into(), + )); + }; + + loop { + // was the node asked to stop? + if !self.node_keep_running.load(Ordering::SeqCst) { + info!("StackerDBListener: received node exit request. Aborting"); + return Ok(()); + } + + // was the listener asked to stop? + if !self.keep_running.load(Ordering::SeqCst) { + info!("StackerDBListener: received listener exit request. Aborting"); + return Ok(()); + } + + let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { + Ok(event) => event, + Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { + debug!("StackerDBListener: No StackerDB event received. Checking flags and polling again."); + continue; + } + Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { + warn!("StackerDBListener: StackerDB event receiver disconnected"); + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "StackerDB event receiver disconnected".into(), + )); + } + }; + + // check to see if this event we got is a signer event + let is_signer_event = + event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); + + if !is_signer_event { + debug!("StackerDBListener: Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); + continue; + } + + let modified_slots = &event.modified_slots.clone(); + + let Ok(signer_event) = SignerEvent::::try_from(event).map_err(|e| { + warn!("StackerDBListener: Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); + }) else { + continue; + }; + let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { + debug!("StackerDBListener: Received signer event other than a signer message. Ignoring."); + continue; + }; + if signer_set != self.signer_set { + debug!( + "StackerDBListener: Received signer event for other reward cycle. Ignoring." + ); + continue; + }; + let slot_ids = modified_slots + .into_iter() + .map(|chunk| chunk.slot_id) + .collect::>(); + + debug!("StackerDBListener: Received messages from signers"; + "count" => messages.len(), + "slot_ids" => ?slot_ids, + ); + + for (message, slot_id) in messages.into_iter().zip(slot_ids) { + let Some(signer_entry) = &self.signer_entries.get(&slot_id) else { + return Err(NakamotoNodeError::SignerSignatureError( + "Signer entry not found".into(), + )); + }; + let Ok(signer_pubkey) = StacksPublicKey::from_slice(&signer_entry.signing_key) + else { + return Err(NakamotoNodeError::SignerSignatureError( + "Failed to parse signer public key".into(), + )); + }; + + match message { + SignerMessageV0::BlockResponse(BlockResponse::Accepted(accepted)) => { + let BlockAccepted { + signer_signature_hash: block_sighash, + signature, + metadata, + response_data, + } = accepted; + let tenure_extend_timestamp = response_data.tenure_extend_timestamp; + + let (lock, cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + + let Some(block) = blocks.get_mut(&block_sighash) else { + info!( + "StackerDBListener: Received signature for block that we did not request. Ignoring."; + "signature" => %signature, + "block_signer_sighash" => %block_sighash, + "slot_id" => slot_id, + "signer_set" => self.signer_set, + ); + continue; + }; + + let Ok(valid_sig) = signer_pubkey.verify(block_sighash.bits(), &signature) + else { + warn!( + "StackerDBListener: Got invalid signature from a signer. Ignoring." + ); + continue; + }; + if !valid_sig { + warn!( + "StackerDBListener: Processed signature but didn't validate over the expected block. Ignoring"; + "signature" => %signature, + "block_signer_signature_hash" => %block_sighash, + "slot_id" => slot_id, + ); + continue; + } + + if Self::fault_injection_ignore_signatures() { + warn!("StackerDBListener: fault injection: ignoring well-formed signature for block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => block.total_weight_signed, + ); + continue; + } + + if !block.gathered_signatures.contains_key(&slot_id) { + block.total_weight_signed = block + .total_weight_signed + .checked_add(signer_entry.weight) + .expect("FATAL: total weight signed exceeds u32::MAX"); + } + + info!("StackerDBListener: Signature Added to block"; + "block_signer_sighash" => %block_sighash, + "signer_pubkey" => signer_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => block.total_weight_signed, + "tenure_extend_timestamp" => tenure_extend_timestamp, + "server_version" => metadata.server_version, + ); + block.gathered_signatures.insert(slot_id, signature); + block.responded_signers.insert(signer_pubkey); + + if block.total_weight_signed >= self.weight_threshold { + // Signal to anyone waiting on this block that we have enough signatures + cvar.notify_all(); + } + + // Update the idle timestamp for this signer + self.update_idle_timestamp( + signer_pubkey, + tenure_extend_timestamp, + signer_entry.weight, + ); + } + SignerMessageV0::BlockResponse(BlockResponse::Rejected(rejected_data)) => { + let (lock, cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + + let Some(block) = blocks.get_mut(&rejected_data.signer_signature_hash) + else { + info!( + "StackerDBListener: Received rejection for block that we did not request. Ignoring."; + "block_signer_sighash" => %rejected_data.signer_signature_hash, + "slot_id" => slot_id, + "signer_set" => self.signer_set, + ); + continue; + }; + + let rejected_pubkey = match rejected_data.recover_public_key() { + Ok(rejected_pubkey) => { + if rejected_pubkey != signer_pubkey { + warn!("StackerDBListener: Recovered public key from rejected data does not match signer's public key. Ignoring."); + continue; + } + rejected_pubkey + } + Err(e) => { + warn!("StackerDBListener: Failed to recover public key from rejected data: {e:?}. Ignoring."); + continue; + } + }; + block.responded_signers.insert(rejected_pubkey); + block.total_reject_weight = block + .total_reject_weight + .checked_add(signer_entry.weight) + .expect("FATAL: total weight rejected exceeds u32::MAX"); + + info!("StackerDBListener: Signer rejected block"; + "block_signer_sighash" => %rejected_data.signer_signature_hash, + "signer_pubkey" => rejected_pubkey.to_hex(), + "signer_slot_id" => slot_id, + "signature" => %rejected_data.signature, + "signer_weight" => signer_entry.weight, + "total_weight_signed" => block.total_weight_signed, + "reason" => rejected_data.reason, + "reason_code" => %rejected_data.reason_code, + "tenure_extend_timestamp" => rejected_data.response_data.tenure_extend_timestamp, + "server_version" => rejected_data.metadata.server_version, + ); + + if block + .total_reject_weight + .saturating_add(self.weight_threshold) + > self.total_weight + { + // Signal to anyone waiting on this block that we have enough rejections + cvar.notify_all(); + } + + // Update the idle timestamp for this signer + self.update_idle_timestamp( + signer_pubkey, + rejected_data.response_data.tenure_extend_timestamp, + signer_entry.weight, + ); + } + SignerMessageV0::BlockProposal(_) => { + debug!("Received block proposal message. Ignoring."); + } + SignerMessageV0::BlockPushed(_) => { + debug!("Received block pushed message. Ignoring."); + } + SignerMessageV0::MockSignature(_) + | SignerMessageV0::MockProposal(_) + | SignerMessageV0::MockBlock(_) => { + debug!("Received mock message. Ignoring."); + } + }; + } + } + } + + fn update_idle_timestamp(&self, signer_pubkey: StacksPublicKey, timestamp: u64, weight: u32) { + let mut idle_timestamps = self + .signer_idle_timestamps + .lock() + .expect("FATAL: failed to lock idle timestamps"); + + // Check the current timestamp for the given signer_pubkey + if let Some(existing_info) = idle_timestamps.get(&signer_pubkey) { + // Only update if the new timestamp is greater + if timestamp <= existing_info.timestamp { + return; // Exit early if the new timestamp is not greater + } + } + + // Update the map with the new timestamp and weight + let timestamp_info = TimestampInfo { timestamp, weight }; + idle_timestamps.insert(signer_pubkey, timestamp_info); + } + + /// Do we ignore signer signatures? + #[cfg(test)] + fn fault_injection_ignore_signatures() -> bool { + TEST_IGNORE_SIGNERS.get() + } + + #[cfg(not(test))] + fn fault_injection_ignore_signatures() -> bool { + false + } +} + +impl Drop for StackerDBListener { + fn drop(&mut self) { + let stackerdb_channel = self + .stackerdb_channel + .lock() + .expect("FATAL: failed to lock stackerdb channel"); + stackerdb_channel.replace_receiver(self.receiver.take().expect( + "FATAL: lost possession of the StackerDB channel before dropping SignCoordinator", + )); + } +} + +impl StackerDBListenerComms { + /// Insert a block into the block status map with initial values. + pub fn insert_block(&self, block: &NakamotoBlockHeader) { + let (lock, _cvar) = &*self.blocks; + let mut blocks = lock.lock().expect("FATAL: failed to lock block status"); + let block_status = BlockStatus { + responded_signers: HashSet::new(), + gathered_signatures: BTreeMap::new(), + total_weight_signed: 0, + total_reject_weight: 0, + }; + blocks.insert(block.signer_signature_hash(), block_status); + } + + /// Get the status for `block` from the Stacker DB listener. + /// If the block is not found in the map, return an error. + /// If the block is found, call `condition` to check if the block status + /// satisfies the condition. + /// If the condition is satisfied, return the block status as + /// `Ok(Some(status))`. + /// If the condition is not satisfied, wait for it to be satisfied. + /// If the timeout is reached, return `Ok(None)`. + pub fn wait_for_block_status( + &self, + block_signer_sighash: &Sha512Trunc256Sum, + timeout: Duration, + condition: F, + ) -> Result, NakamotoNodeError> + where + F: Fn(&BlockStatus) -> bool, + { + let (lock, cvar) = &*self.blocks; + let blocks = lock.lock().expect("FATAL: failed to lock block status"); + + let (guard, timeout_result) = cvar + .wait_timeout_while(blocks, timeout, |map| { + let Some(status) = map.get(block_signer_sighash) else { + return true; + }; + condition(status) + }) + .expect("FATAL: failed to wait on block status cond var"); + + // If we timed out, return None + if timeout_result.timed_out() { + return Ok(None); + } + match guard.get(block_signer_sighash) { + Some(status) => Ok(Some(status.clone())), + None => Err(NakamotoNodeError::SigningCoordinatorFailure( + "Block not found in status map".into(), + )), + } + } + + /// Get the timestamp at which at least 70% of the signing power should be + /// willing to accept a time-based tenure extension. + pub fn get_tenure_extend_timestamp(&self, weight_threshold: u32) -> u64 { + let signer_idle_timestamps = self + .signer_idle_timestamps + .lock() + .expect("FATAL: failed to lock signer idle timestamps"); + debug!("SignerCoordinator: signer_idle_timestamps: {signer_idle_timestamps:?}"); + let mut idle_timestamps = signer_idle_timestamps.values().collect::>(); + idle_timestamps.sort_by_key(|info| info.timestamp); + let mut weight_sum = 0; + for info in idle_timestamps { + weight_sum += info.weight; + if weight_sum >= weight_threshold { + debug!("SignerCoordinator: 70% threshold reached for tenure extension timestamp"; + "tenure_extend_timestamp" => info.timestamp, + "tenure_extend_in" => (info.timestamp as i64 - get_epoch_time_secs() as i64) + ); + return info.timestamp; + } + } + + // We don't have enough information to reach a 70% threshold at any + // time, so return u64::MAX to indicate that we should not extend the + // tenure. + u64::MAX + } +} diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index b688db100d..d2143f4986 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -223,7 +223,7 @@ use crate::burnchains::{make_bitcoin_indexer, Error as BurnchainControllerError} use crate::chain_data::MinerStats; use crate::config::NodeConfig; use crate::globals::{NeonGlobals as Globals, RelayerDirective}; -use crate::nakamoto_node::sign_coordinator::SignCoordinator; +use crate::nakamoto_node::signer_coordinator::SignerCoordinator; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; use crate::ChainTip; @@ -611,8 +611,7 @@ impl MicroblockMinerThread { match StacksChainState::get_anchored_block_header_info(chainstate.db(), &ch, &bhh) { Ok(Some(_)) => { let parent_index_hash = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - let cost_so_far = if relayer_thread.microblock_stream_cost == ExecutionCost::zero() - { + let cost_so_far = if relayer_thread.microblock_stream_cost == ExecutionCost::ZERO { // unknown cost, or this is idempotent. StacksChainState::get_stacks_block_anchored_cost( chainstate.db(), @@ -2364,7 +2363,7 @@ impl BlockMinerThread { let mut miners_stackerdb = StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); - SignCoordinator::send_miners_message( + SignerCoordinator::send_miners_message( &mining_key, &burn_db, &self.burn_block, @@ -2392,7 +2391,7 @@ impl BlockMinerThread { }; info!("Sending mock block to stackerdb: {mock_block:?}"); - SignCoordinator::send_miners_message( + SignerCoordinator::send_miners_message( &mining_key, &burn_db, &self.burn_block, @@ -2845,7 +2844,7 @@ impl RelayerThread { miner_tip: None, last_microblock_tenure_time: 0, microblock_deadline: 0, - microblock_stream_cost: ExecutionCost::zero(), + microblock_stream_cost: ExecutionCost::ZERO, relayer, @@ -3503,7 +3502,7 @@ impl RelayerThread { if best_tip == new_miner_tip && best_tip != my_miner_tip { // tip has changed debug!("Relayer: Best miner tip went from {my_miner_tip:?} to {new_miner_tip:?}"); - self.microblock_stream_cost = ExecutionCost::zero(); + self.microblock_stream_cost = ExecutionCost::ZERO; } self.miner_tip = best_tip; } diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index 7990c04332..819ace144c 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -199,8 +199,8 @@ pub fn announce_boot_receipts( block_header_0.burn_header_hash, block_header_0.burn_header_height, block_header_0.burn_header_timestamp, - &ExecutionCost::zero(), - &ExecutionCost::zero(), + &ExecutionCost::ZERO, + &ExecutionCost::ZERO, pox_constants, &None, &None, diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 5e021e50ab..b2171b4e8b 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -73,6 +73,18 @@ impl Default for RunLoopCounter { } } +impl RunLoopCounter { + #[cfg(test)] + pub fn get(&self) -> u64 { + self.0.load(Ordering::SeqCst) + } + + #[cfg(test)] + pub fn load(&self, ordering: Ordering) -> u64 { + self.0.load(ordering) + } +} + #[cfg(test)] impl std::ops::Deref for RunLoopCounter { type Target = Arc; diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index b305a7429a..1964612bd4 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -1123,8 +1123,8 @@ fn bigger_microblock_streams_in_2_05() { sleep_ms(120_000); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let mut epoch_20_stream_cost = ExecutionCost::zero(); - let mut epoch_205_stream_cost = ExecutionCost::zero(); + let mut epoch_20_stream_cost = ExecutionCost::ZERO; + let mut epoch_205_stream_cost = ExecutionCost::ZERO; // max == largest number of transactions per stream in a given epoch (2.0 or 2.05) // total == number of transactions across all streams in a given epoch (2.0 or 2.05) @@ -1155,7 +1155,7 @@ fn bigger_microblock_streams_in_2_05() { eprintln!("{}", transactions.len()); let mut num_big_microblock_txs = 0; - let mut total_execution_cost = ExecutionCost::zero(); + let mut total_execution_cost = ExecutionCost::ZERO; for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); @@ -1204,7 +1204,7 @@ fn bigger_microblock_streams_in_2_05() { epoch_20_stream_cost = total_execution_cost; break; } - if in_205 && total_execution_cost.exceeds(&ExecutionCost::zero()) { + if in_205 && total_execution_cost.exceeds(&ExecutionCost::ZERO) { have_confirmed_205_stream = true; epoch_205_stream_cost = total_execution_cost; break; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 25d22801c0..281feae99a 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -6373,18 +6373,12 @@ fn signer_chainstate() { ) .unwrap(); - let reward_cycle = burnchain - .block_height_to_reward_cycle( - SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .unwrap() - .block_height, - ) - .unwrap(); // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); @@ -6400,7 +6394,6 @@ fn signer_chainstate() { &mut signer_db, prior_tenure_first, miner_pk, - reward_cycle, true, ) .unwrap(); @@ -6410,14 +6403,7 @@ fn signer_chainstate() { ); for block in prior_tenure_interims.iter() { let valid = sortitions_view - .check_proposal( - &signer_client, - &mut signer_db, - block, - miner_pk, - reward_cycle, - true, - ) + .check_proposal(&signer_client, &mut signer_db, block, miner_pk, true) .unwrap(); assert!( !valid, @@ -6452,7 +6438,6 @@ fn signer_chainstate() { &mut signer_db, &proposal.0, &proposal.1, - reward_cycle, true, ) .unwrap(); @@ -6474,6 +6459,7 @@ fn signer_chainstate() { signed_group: None, ext: ExtraBlockInfo::None, state: BlockState::Unprocessed, + validation_time_ms: None, }) .unwrap(); @@ -6508,7 +6494,6 @@ fn signer_chainstate() { &mut signer_db, &proposal_interim.0, &proposal_interim.1, - reward_cycle, true, ) .unwrap(); @@ -6524,6 +6509,7 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -6539,7 +6525,6 @@ fn signer_chainstate() { &mut signer_db, &proposal_interim.0, &proposal_interim.1, - reward_cycle, true, ) .unwrap(); @@ -6562,6 +6547,7 @@ fn signer_chainstate() { signed_group: Some(get_epoch_time_secs()), ext: ExtraBlockInfo::None, state: BlockState::GloballyAccepted, + validation_time_ms: Some(1000), }) .unwrap(); @@ -6602,14 +6588,9 @@ fn signer_chainstate() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); - let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .unwrap() - .block_height; - let reward_cycle = burnchain - .block_height_to_reward_cycle(burn_block_height) - .unwrap(); assert!( !sortitions_view .check_proposal( @@ -6617,7 +6598,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -6675,7 +6655,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -6739,7 +6718,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -6805,7 +6783,6 @@ fn signer_chainstate() { &mut signer_db, &sibling_block, &miner_pk, - reward_cycle, false, ) .unwrap(), @@ -9658,8 +9635,6 @@ fn test_shadow_recovery() { let coord_channel = signer_test.running_nodes.coord_channel.clone(); let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); - let burnchain = naka_conf.get_burnchain(); - // make another tenure next_block_and_mine_commit( btc_regtest_controller, diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 7d9f1f0dc8..e3d592d23c 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -5766,7 +5766,7 @@ fn cost_voting_integration() { let transactions = block.get("transactions").unwrap().as_array().unwrap(); eprintln!("{}", transactions.len()); let mut tested = false; - let mut exec_cost = ExecutionCost::zero(); + let mut exec_cost = ExecutionCost::ZERO; for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 946a566c13..ff128d0a03 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -36,7 +36,9 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::types::PrincipalData; -use libsigner::v0::messages::{BlockResponse, SignerMessage}; +use libsigner::v0::messages::{ + BlockAccepted, BlockResponse, MessageSlotID, PeerInfo, SignerMessage, +}; use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -53,14 +55,14 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Sha512Trunc256Sum; -use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; +use stacks_signer::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; use super::nakamoto_integrations::{check_nakamoto_empty_block_heuristics, wait_for}; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; -use crate::neon::{Counters, TestFlag}; +use crate::neon::{Counters, RunLoopCounter, TestFlag}; use crate::run_loop::boot_nakamoto; use crate::tests::bitcoin_regtest::BitcoinCoreController; use crate::tests::nakamoto_integrations::{ @@ -81,13 +83,13 @@ pub struct RunningNodes { pub btcd_controller: BitcoinCoreController, pub run_loop_thread: thread::JoinHandle<()>, pub run_loop_stopper: Arc, - pub vrfs_submitted: Arc, - pub commits_submitted: Arc, - pub blocks_processed: Arc, - pub nakamoto_blocks_proposed: Arc, - pub nakamoto_blocks_mined: Arc, - pub nakamoto_blocks_rejected: Arc, - pub nakamoto_blocks_signer_pushed: Arc, + pub vrfs_submitted: RunLoopCounter, + pub commits_submitted: RunLoopCounter, + pub blocks_processed: RunLoopCounter, + pub nakamoto_blocks_proposed: RunLoopCounter, + pub nakamoto_blocks_mined: RunLoopCounter, + pub nakamoto_blocks_rejected: RunLoopCounter, + pub nakamoto_blocks_signer_pushed: RunLoopCounter, pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, @@ -307,10 +309,12 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest info_before.stacks_tip_height) + let info_after = self.get_peer_info(); + let blocks_mined = self.running_nodes.nakamoto_blocks_mined.get(); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height + && blocks_mined > mined_before) }) .unwrap(); let mined_block_elapsed_time = mined_block_time.elapsed(); @@ -355,6 +361,26 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ()) { + let blocks_before = self.running_nodes.nakamoto_blocks_mined.get(); + let info_before = self.get_peer_info(); + + f(); + + // Verify that the block was mined + wait_for(timeout_secs, || { + let blocks_mined = self.running_nodes.nakamoto_blocks_mined.get(); + let info = self.get_peer_info(); + Ok(blocks_mined > blocks_before + && info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for nakamoto block to be mined"); + } + /// Wait for a confirmed block and return a list of individual /// signer signatures fn wait_for_confirmed_block_v0( @@ -618,6 +644,45 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest BlockResponse { + let mut stackerdb = StackerDB::new( + &self.running_nodes.conf.node.rpc_bind, + StacksPrivateKey::new(), // We are just reading so don't care what the key is + false, + self.get_current_reward_cycle(), + SignerSlotID(0), // We are just reading so again, don't care about index. + ); + let latest_msgs = StackerDB::get_messages( + stackerdb + .get_session_mut(&MessageSlotID::BlockResponse) + .expect("Failed to get BlockResponse stackerdb session"), + &[slot_id], + ) + .expect("Failed to get message from stackerdb"); + let latest_msg = latest_msgs.last().unwrap(); + let SignerMessage::BlockResponse(block_response) = latest_msg else { + panic!("Latest message from slot #{slot_id} isn't a block acceptance"); + }; + block_response.clone() + } + + /// Get the latest block acceptance from the given slot + pub fn get_latest_block_acceptance(&self, slot_id: u32) -> BlockAccepted { + let block_response = self.get_latest_block_response(slot_id); + match block_response { + BlockResponse::Accepted(accepted) => accepted, + _ => panic!("Latest block response from slot #{slot_id} isn't a block acceptance"), + } + } + + /// Get /v2/info from the node + pub fn get_peer_info(&self) -> PeerInfo { + self.stacks_client + .get_peer_info() + .expect("Failed to get peer info") + } } fn setup_stx_btc_node( @@ -747,13 +812,13 @@ fn setup_stx_btc_node( btc_regtest_controller, run_loop_thread, run_loop_stopper, - vrfs_submitted: vrfs_submitted.0, - commits_submitted: commits_submitted.0, - blocks_processed: blocks_processed.0, - nakamoto_blocks_proposed: naka_blocks_proposed.0, - nakamoto_blocks_mined: naka_blocks_mined.0, - nakamoto_blocks_rejected: naka_blocks_rejected.0, - nakamoto_blocks_signer_pushed: naka_signer_pushed_blocks.0, + vrfs_submitted, + commits_submitted, + blocks_processed, + nakamoto_blocks_proposed: naka_blocks_proposed, + nakamoto_blocks_mined: naka_blocks_mined, + nakamoto_blocks_rejected: naka_blocks_rejected, + nakamoto_blocks_signer_pushed: naka_signer_pushed_blocks, nakamoto_test_skip_commit_op, coord_channel, conf: naka_conf, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9c9fee200f..df20fc0087 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -39,11 +39,14 @@ use stacks::codec::StacksMessageCodec; use stacks::core::{StacksEpochId, CHAIN_ID_TESTNET}; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::api::getsigner::GetSignerResponse; -use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STALL}; +use stacks::net::api::postblock_proposal::{ + ValidateRejectCode, TEST_VALIDATE_DELAY_DURATION_SECS, TEST_VALIDATE_STALL, +}; use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; +use stacks::util::get_epoch_time_secs; +use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc, Sha512Trunc256Sum}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -69,7 +72,7 @@ use crate::event_dispatcher::MinedNakamotoBlockEvent; use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; -use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; +use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ @@ -81,7 +84,9 @@ use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, }; -use crate::tests::{self, gen_random_port, make_stacks_transfer}; +use crate::tests::{ + self, gen_random_port, make_contract_call, make_contract_publish, make_stacks_transfer, +}; use crate::{nakamoto_node, BitcoinRegtestController, BurnchainController, Config, Keychain}; impl SignerTest { @@ -477,6 +482,7 @@ fn block_proposal_rejection() { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), tenure_last_block_proposal_timeout: Duration::from_secs(30), + tenure_idle_timeout: Duration::from_secs(300), }; let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), @@ -529,7 +535,10 @@ fn block_proposal_rejection() { { if signer_signature_hash == block_signer_signature_hash_1 { found_signer_signature_hash_1 = true; - assert!(matches!(reason_code, RejectCode::SortitionViewMismatch)); + assert!( + matches!(reason_code, RejectCode::SortitionViewMismatch), + "Expected sortition view mismatch rejection. Got: {reason_code}" + ); } else if signer_signature_hash == block_signer_signature_hash_2 { found_signer_signature_hash_2 = true; assert!(matches!( @@ -1918,10 +1927,15 @@ fn miner_forking() { info!("Flushing any pending commits to enable custom winner selection"); let burn_height_before = get_burn_height(); + let blocks_before = test_observer::get_blocks().len(); + let nakamoto_blocks_count_before = get_nakamoto_headers(&conf).len(); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 30, - || Ok(get_burn_height() > burn_height_before), + || { + Ok(get_burn_height() > burn_height_before + && test_observer::get_blocks().len() > blocks_before) + }, ) .unwrap(); @@ -2117,11 +2131,14 @@ fn miner_forking() { }) .expect("Timed out waiting for miner 1 to RBF its old commit op"); + let blocks_before = test_observer::get_blocks().len(); info!("Mine RL1 Tenure"); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_blocks().len() > blocks_before), + ) + .unwrap(); // fetch the current sortition info let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); @@ -2159,14 +2176,16 @@ fn miner_forking() { let peer_1_height = get_chain_info(&conf).stacks_tip_height; let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + let nakamoto_blocks_count = get_nakamoto_headers(&conf).len(); info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + info!("Nakamoto blocks count before test: {nakamoto_blocks_count_before}, Nakamoto blocks count now: {nakamoto_blocks_count}"); assert_eq!(peer_1_height, peer_2_height); let nakamoto_blocks_count = get_nakamoto_headers(&conf).len(); assert_eq!( peer_1_height - pre_nakamoto_peer_1_height, - u64::try_from(nakamoto_blocks_count).unwrap() - 1, // subtract 1 for the first Nakamoto block + u64::try_from(nakamoto_blocks_count - nakamoto_blocks_count_before).unwrap(), // subtract 1 for the first Nakamoto block "There should be no forks in this test" ); @@ -2509,7 +2528,7 @@ fn signers_broadcast_signed_blocks() { }) .expect("Timed out waiting for first nakamoto block to be mined"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); let blocks_before = signer_test .running_nodes .nakamoto_blocks_mined @@ -2558,6 +2577,441 @@ fn signers_broadcast_signed_blocks() { signer_test.shutdown(); } +#[test] +#[ignore] +/// This test verifies that a miner will produce a TenureExtend transaction after the idle timeout is reached. +fn tenure_extend_after_idle() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(30); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |_| {}, + None, + None, + ); + let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + + info!("---- Waiting for a tenure extend ----"); + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Verify that Nakamoto blocks that don't modify the tenure's execution cost +/// don't modify the idle timeout. +fn stx_transfers_dont_effect_idle_timeout() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(60); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |_| {}, + None, + None, + ); + let naka_conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + // Add a delay to the block validation process + TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap().replace(5); + + let info_before = signer_test.get_peer_info(); + let blocks_before = signer_test.running_nodes.nakamoto_blocks_mined.get(); + info!("---- Nakamoto booted, starting test ----"; + "info_height" => info_before.stacks_tip_height, + "blocks_before" => blocks_before, + ); + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + + info!("---- Getting current idle timeout ----"); + + let reward_cycle = signer_test.get_current_reward_cycle(); + + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + assert_eq!(signer_slot_ids.len(), num_signers); + + let get_last_block_hash = || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let block_hash = + hex_bytes(&last_block.get("block_hash").unwrap().as_str().unwrap()[2..]).unwrap(); + Sha512Trunc256Sum::from_vec(&block_hash).unwrap() + }; + + let last_block_hash = get_last_block_hash(); + + let slot_id = 0_u32; + + let initial_acceptance = signer_test.get_latest_block_acceptance(slot_id); + assert_eq!(initial_acceptance.signer_signature_hash, last_block_hash); + + info!( + "---- Last idle timeout: {} ----", + initial_acceptance.response_data.tenure_extend_timestamp + ); + + // Now, mine a few nakamoto blocks with just transfers + + let mut sender_nonce = 0; + + // Note that this response was BEFORE the block was globally accepted. it will report a guestimated idle time + let initial_acceptance = initial_acceptance; + let mut first_global_acceptance = None; + for i in 0..num_txs { + info!("---- Mining interim block {} ----", i + 1); + signer_test.wait_for_nakamoto_block(30, || { + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + }); + + let latest_acceptance = signer_test.get_latest_block_acceptance(slot_id); + let last_block_hash = get_last_block_hash(); + + assert_eq!(latest_acceptance.signer_signature_hash, last_block_hash); + + if first_global_acceptance.is_none() { + assert!(latest_acceptance.response_data.tenure_extend_timestamp < initial_acceptance.response_data.tenure_extend_timestamp, "First global acceptance should be less than initial guesstimated acceptance as its based on block proposal time rather than epoch time at time of response."); + first_global_acceptance = Some(latest_acceptance); + } else { + // Because the block only contains transfers, the idle timeout should not have changed between blocks post the tenure change + assert_eq!( + latest_acceptance.response_data.tenure_extend_timestamp, + first_global_acceptance + .as_ref() + .map(|acceptance| acceptance.response_data.tenure_extend_timestamp) + .unwrap() + ); + }; + } + + info!("---- Waiting for a tenure extend ----"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Verify that a tenure extend will occur after an idle timeout +/// while actively mining. +fn idle_tenure_extend_active_mining() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let deployer_sk = Secp256k1PrivateKey::new(); + let deployer_addr = tests::to_addr(&deployer_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 5; + let num_naka_blocks = 5; + let tenure_count = 2; + let tx_fee = 10000; + let deploy_fee = 190200; + let amount = + deploy_fee + tx_fee * num_txs * tenure_count * num_naka_blocks * 100 + 100 * tenure_count; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let idle_timeout = Duration::from_secs(60); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, amount), (deployer_addr, amount)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |_| {}, + None, + None, + ); + let naka_conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + let mut sender_nonces: HashMap = HashMap::new(); + + let get_and_increment_nonce = + |sender_sk: &Secp256k1PrivateKey, sender_nonces: &mut HashMap| { + let nonce = sender_nonces.get(&sender_sk.to_hex()).unwrap_or(&0); + let result = *nonce; + sender_nonces.insert(sender_sk.to_hex(), result + 1); + result + }; + + signer_test.boot_to_epoch_3(); + + // Add a delay to the block validation process + TEST_VALIDATE_DELAY_DURATION_SECS.lock().unwrap().replace(3); + + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + + info!("---- Getting current idle timeout ----"); + + let get_last_block_hash = || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let block_hash = + hex_bytes(&last_block.get("block_hash").unwrap().as_str().unwrap()[2..]).unwrap(); + Sha512Trunc256Sum::from_vec(&block_hash).unwrap() + }; + + let last_block_hash = get_last_block_hash(); + + let slot_id = 0_u32; + + let get_last_block_hash = || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let block_hash = + hex_bytes(&last_block.get("block_hash").unwrap().as_str().unwrap()[2..]).unwrap(); + Sha512Trunc256Sum::from_vec(&block_hash).unwrap() + }; + + let log_idle_diff = |timestamp: u64| { + let now = get_epoch_time_secs(); + let diff = timestamp.saturating_sub(now); + info!("----- Idle diff: {diff} seconds -----"); + }; + + let initial_response = signer_test.get_latest_block_response(slot_id); + assert_eq!( + initial_response.get_signer_signature_hash(), + last_block_hash + ); + + info!( + "---- Last idle timeout: {} ----", + initial_response.get_tenure_extend_timestamp() + ); + + // Deploy a contract that will be called a lot + + let contract_src = format!( + r#" +(define-data-var my-var uint u0) +(define-public (f) (begin {} (ok 1))) (begin (f)) + "#, + (0..250) + .map(|_| format!("(var-get my-var)")) + .collect::>() + .join(" ") + ); + + // First, lets deploy the contract + let deployer_nonce = get_and_increment_nonce(&deployer_sk, &mut sender_nonces); + let contract_tx = make_contract_publish( + &deployer_sk, + deployer_nonce, + deploy_fee, + naka_conf.burnchain.chain_id, + "small-contract", + &contract_src, + ); + submit_tx(&http_origin, &contract_tx); + + info!("----- Submitted deploy txs, mining BTC block -----"); + + signer_test.mine_nakamoto_block(Duration::from_secs(30)); + let mut last_response = signer_test.get_latest_block_response(slot_id); + + // Make multiple tenures that get extended through idle timeouts + for t in 1..=tenure_count { + info!("----- Mining tenure {t} -----"); + log_idle_diff(last_response.get_tenure_extend_timestamp()); + // Now, start a tenure with contract calls + for i in 1..=num_naka_blocks { + // Just in case these Nakamoto blocks pass the idle timeout (probably because CI is slow), exit early + if i != 1 && last_block_contains_tenure_change_tx(TenureChangeCause::Extended) { + info!("---- Tenure extended before mining {i} nakamoto blocks -----"); + break; + } + info!("----- Mining nakamoto block {i} in tenure {t} -----"); + + signer_test.wait_for_nakamoto_block(30, || { + // Throw in a STX transfer to test mixed blocks + let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + for _ in 0..num_txs { + let deployer_nonce = get_and_increment_nonce(&deployer_sk, &mut sender_nonces); + // Fill up the mempool with contract calls + let contract_tx = make_contract_call( + &deployer_sk, + deployer_nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &deployer_addr, + "small-contract", + "f", + &[], + ); + match submit_tx_fallible(&http_origin, &contract_tx) { + Ok(_txid) => {} + Err(_e) => { + // If we fail to submit a tx, we need to make sure we don't + // increment the nonce for this sender, so we don't end up + // skipping a tx. + sender_nonces.insert(deployer_sk.to_hex(), deployer_nonce); + } + } + } + }); + let latest_response = signer_test.get_latest_block_response(slot_id); + let naka_blocks = test_observer::get_mined_nakamoto_blocks(); + info!( + "----- Latest tenure extend timestamp: {} -----", + latest_response.get_tenure_extend_timestamp() + ); + log_idle_diff(latest_response.get_tenure_extend_timestamp()); + info!( + "----- Latest block transaction events: {} -----", + naka_blocks.last().unwrap().tx_events.len() + ); + assert_eq!( + latest_response.get_signer_signature_hash(), + get_last_block_hash(), + "Expected the latest block response to be for the latest block" + ); + assert_ne!( + last_response.get_tenure_extend_timestamp(), + latest_response.get_tenure_extend_timestamp(), + "Tenure extend timestamp should change with each block" + ); + last_response = latest_response; + } + + let current_time = get_epoch_time_secs(); + let extend_diff = last_response + .get_tenure_extend_timestamp() + .saturating_sub(current_time); + + info!( + "----- After mining {num_naka_blocks} nakamoto blocks in tenure {t}, waiting for TenureExtend -----"; + "tenure_extend_timestamp" => last_response.get_tenure_extend_timestamp(), + "extend_diff" => extend_diff, + "current_time" => current_time, + ); + + // Now, wait for the idle timeout to trigger + wait_for(extend_diff + 30, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Expected a tenure extend after idle timeout"); + + last_response = signer_test.get_latest_block_response(slot_id); + + info!("----- Tenure {t} extended -----"); + log_idle_diff(last_response.get_tenure_extend_timestamp()); + } + + // After the last extend, mine a few more naka blocks + for i in 1..=num_naka_blocks { + // Just in case these Nakamoto blocks pass the idle timeout (probably because CI is slow), exit early + if i != 1 && last_block_contains_tenure_change_tx(TenureChangeCause::Extended) { + info!("---- Tenure extended before mining {i} nakamoto blocks -----"); + break; + } + info!("----- Mining nakamoto block {i} after last tenure extend -----"); + + signer_test.wait_for_nakamoto_block(30, || { + // Throw in a STX transfer to test mixed blocks + let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + naka_conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + }); + } + + info!("------------------------- Test Shutdown -------------------------"); + signer_test.shutdown(); +} + #[test] #[ignore] /// This test checks the behaviour of signers when a sortition is empty. Specifically: @@ -2794,7 +3248,7 @@ fn empty_sortition_before_approval() { let stacks_height_before = info.stacks_tip_height; info!("Forcing miner to ignore signatures for next block"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); info!("Pausing block commits to trigger an empty sortition."); signer_test @@ -2847,7 +3301,7 @@ fn empty_sortition_before_approval() { .replace(false); info!("Stop ignoring signers and wait for the tip to advance"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + TEST_IGNORE_SIGNERS.set(false); wait_for(60, || { let info = get_chain_info(&signer_test.running_nodes.conf); @@ -5608,7 +6062,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // broadcasted to the miner so it can end its tenure before block confirmation obtained // Clear the stackerdb chunks info!("Forcing miner to ignore block responses for block N+1"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + TEST_IGNORE_SIGNERS.set(true); info!("Delaying signer block N+1 broadcasting to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); test_observer::clear(); @@ -5735,7 +6189,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .expect("Timed out waiting for block proposal of N+1' block proposal"); info!("Allowing miner to accept block responses again. "); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + TEST_IGNORE_SIGNERS.set(false); info!("Allowing signers to broadcast block N+1 to the miner"); TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(false); @@ -7054,6 +7508,7 @@ fn block_validation_response_timeout() { first_proposal_burn_block_timing: Duration::from_secs(0), tenure_last_block_proposal_timeout: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(100), + tenure_idle_timeout: Duration::from_secs(300), }; let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(),