diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 138913ee6c..63dc0e9a1f 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -99,9 +99,6 @@ pub enum ClientError { /// No reward set exists for the given reward cycle #[error("No reward set exists for reward cycle {0}")] NoRewardSet(u64), - /// Reward set contained corrupted data - #[error("{0}")] - CorruptedRewardSet(String), /// Stacks node does not support a feature we need #[error("Stacks node does not support a required feature: {0}")] UnsupportedStacksFeature(String), @@ -156,7 +153,8 @@ pub(crate) mod tests { use wsts::state_machine::PublicKeys; use super::*; - use crate::config::{GlobalConfig, RegisteredSignersInfo, SignerConfig}; + use crate::config::{GlobalConfig, ParsedSignerEntries, SignerConfig}; + use crate::signer::SignerSlotID; pub struct MockServerClient { pub server: TcpListener, @@ -425,7 +423,7 @@ pub(crate) mod tests { let mut start_key_id = 1u32; let mut end_key_id = start_key_id; let mut signer_public_keys = HashMap::new(); - let mut signer_slot_ids = HashMap::new(); + let mut signer_slot_ids = vec![]; let ecdsa_private_key = config.ecdsa_private_key; let ecdsa_public_key = ecdsa::PublicKey::new(&ecdsa_private_key).expect("Failed to create ecdsa public key"); @@ -459,7 +457,7 @@ pub(crate) mod tests { &StacksPublicKey::from_slice(ecdsa_public_key.to_bytes().as_slice()) .expect("Failed to create stacks public key"), ); - signer_slot_ids.insert(address, signer_id); // Note in a real world situation, these would not always match + signer_slot_ids.push(SignerSlotID(signer_id)); signer_ids.insert(address, signer_id); continue; @@ -486,23 +484,23 @@ pub(crate) mod tests { &StacksPublicKey::from_slice(public_key.to_bytes().as_slice()) .expect("Failed to create stacks public key"), ); - signer_slot_ids.insert(address, signer_id); // Note in a real world situation, these would not always match + signer_slot_ids.push(SignerSlotID(signer_id)); signer_ids.insert(address, signer_id); start_key_id = end_key_id; } SignerConfig { reward_cycle, signer_id: 0, - signer_slot_id: 0, + signer_slot_id: SignerSlotID(rand::thread_rng().gen_range(0..num_signers)), // Give a random signer slot id between 0 and num_signers key_ids: signer_key_ids.get(&0).cloned().unwrap_or_default(), - registered_signers: RegisteredSignersInfo { - signer_slot_ids, + signer_entries: ParsedSignerEntries { public_keys, coordinator_key_ids, signer_key_ids, signer_ids, signer_public_keys, }, + signer_slot_ids, ecdsa_private_key: config.ecdsa_private_key, stacks_private_key: config.stacks_private_key, node_host: config.node_host, diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index affd43ab2a..77f4d679d9 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -33,6 +33,7 @@ use stacks_common::{debug, warn}; use super::ClientError; use crate::client::retry_with_exponential_backoff; use crate::config::SignerConfig; +use crate::signer::SignerSlotID; /// The StackerDB client for communicating with the .signers contract pub struct StackerDB { @@ -42,9 +43,9 @@ pub struct StackerDB { /// The private key used in all stacks node communications stacks_private_key: StacksPrivateKey, /// A map of a message ID to last chunk version for each session - slot_versions: HashMap>, + slot_versions: HashMap>, /// The signer slot ID -- the index into the signer list for this signer daemon's signing key. - signer_slot_id: u32, + signer_slot_id: SignerSlotID, /// The reward cycle of the connecting signer reward_cycle: u64, /// The stacker-db transaction msg session for the NEXT reward cycle @@ -69,7 +70,7 @@ impl StackerDB { stacks_private_key: StacksPrivateKey, is_mainnet: bool, reward_cycle: u64, - signer_slot_id: u32, + signer_slot_id: SignerSlotID, ) -> Self { let mut signers_message_stackerdb_sessions = HashMap::new(); let stackerdb_issuer = boot_code_addr(is_mainnet); @@ -134,7 +135,7 @@ impl StackerDB { 1 }; - let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message_bytes.clone()); + let mut chunk = StackerDBChunkData::new(slot_id.0, slot_version, message_bytes.clone()); chunk.sign(&self.stacks_private_key)?; let Some(session) = self.signers_message_stackerdb_sessions.get_mut(&msg_id) else { @@ -184,11 +185,11 @@ impl StackerDB { /// Get the transactions from stackerdb for the signers fn get_transactions( transactions_session: &mut StackerDBSession, - signer_ids: &[u32], + signer_ids: &[SignerSlotID], ) -> Result, ClientError> { let send_request = || { transactions_session - .get_latest_chunks(signer_ids) + .get_latest_chunks(&signer_ids.iter().map(|id| id.0).collect::>()) .map_err(backoff::Error::transient) }; let chunk_ack = retry_with_exponential_backoff(send_request)?; @@ -225,25 +226,23 @@ impl StackerDB { Ok(transactions) } - /// Get the latest signer transactions from signer ids for the current reward cycle + /// Get this signer's latest transactions from stackerdb pub fn get_current_transactions_with_retry( &mut self, - signer_id: u32, ) -> Result, ClientError> { - debug!("Signer #{signer_id}: Getting latest transactions from stacker db",); let Some(transactions_session) = self .signers_message_stackerdb_sessions .get_mut(&TRANSACTIONS_MSG_ID) else { return Err(ClientError::NotConnected); }; - Self::get_transactions(transactions_session, &[signer_id]) + Self::get_transactions(transactions_session, &[self.signer_slot_id]) } /// Get the latest signer transactions from signer ids for the next reward cycle pub fn get_next_transactions_with_retry( &mut self, - signer_ids: &[u32], + signer_ids: &[SignerSlotID], ) -> Result, ClientError> { debug!("Getting latest chunks from stackerdb for the following signers: {signer_ids:?}",); Self::get_transactions(&mut self.next_transaction_session, signer_ids) @@ -255,7 +254,7 @@ impl StackerDB { } /// Retrieve the signer slot ID - pub fn get_signer_slot_id(&mut self) -> u32 { + pub fn get_signer_slot_id(&mut self) -> SignerSlotID { self.signer_slot_id } } @@ -302,8 +301,8 @@ mod tests { let signer_message = SignerMessage::Transactions(vec![tx.clone()]); let message = signer_message.serialize_to_vec(); - let signer_ids = vec![0, 1]; - let h = spawn(move || stackerdb.get_next_transactions_with_retry(&signer_ids)); + let signer_slot_ids = vec![SignerSlotID(0), SignerSlotID(1)]; + let h = spawn(move || stackerdb.get_next_transactions_with_retry(&signer_slot_ids)); let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); response_bytes.extend(message); let mock_server = mock_server_from_config(&config); diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 471cec068f..da67e6f448 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -18,7 +18,7 @@ use std::net::SocketAddr; use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{ - RewardSet, SIGNERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, + NakamotoSignerEntry, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; use blockstack_lib::chainstate::stacks::{ StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, @@ -34,20 +34,17 @@ use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; use blockstack_lib::util_lib::boot::{boot_code_addr, boot_code_id}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; -use hashbrown::{HashMap, HashSet}; use serde_json::json; -use slog::{slog_debug, slog_warn}; +use slog::slog_debug; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; +use stacks_common::debug; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; use stacks_common::types::StacksEpochId; -use stacks_common::{debug, warn}; -use wsts::curve::ecdsa; use wsts::curve::point::{Compressed, Point}; -use wsts::state_machine::PublicKeys; use crate::client::{retry_with_exponential_backoff, ClientError}; -use crate::config::{GlobalConfig, RegisteredSignersInfo}; +use crate::config::GlobalConfig; /// The Stacks signer client used to communicate with the stacks node #[derive(Clone, Debug)] @@ -296,8 +293,11 @@ impl StacksClient { Ok(round) } - /// Get the reward set from the stacks node for the given reward cycle - pub fn get_reward_set(&self, reward_cycle: u64) -> Result { + /// Get the reward set signers from the stacks node for the given reward cycle + pub fn get_reward_set_signers( + &self, + reward_cycle: u64, + ) -> Result>, ClientError> { debug!("Getting reward set for reward cycle {reward_cycle}..."); let send_request = || { self.stacks_node_client @@ -310,104 +310,7 @@ impl StacksClient { return Err(ClientError::RequestFailure(response.status())); } let stackers_response = response.json::()?; - Ok(stackers_response.stacker_set) - } - - /// Get the registered signers for a specific reward cycle - /// Returns None if no signers are registered or its not Nakamoto cycle - pub fn get_registered_signers_info( - &self, - reward_cycle: u64, - ) -> Result, ClientError> { - debug!("Getting registered signers for reward cycle {reward_cycle}..."); - let reward_set = self.get_reward_set(reward_cycle)?; - let Some(reward_set_signers) = reward_set.signers else { - warn!("No reward set signers found for reward cycle {reward_cycle}."); - return Ok(None); - }; - if reward_set_signers.is_empty() { - warn!("No registered signers found for reward cycle {reward_cycle}."); - return Ok(None); - } - // signer uses a Vec for its key_ids, but coordinator uses a HashSet for each signer since it needs to do lots of lookups - let mut weight_end = 1; - let mut coordinator_key_ids = HashMap::with_capacity(4000); - let mut signer_key_ids = HashMap::with_capacity(reward_set_signers.len()); - let mut signer_ids = HashMap::with_capacity(reward_set_signers.len()); - let mut public_keys = PublicKeys { - signers: HashMap::with_capacity(reward_set_signers.len()), - key_ids: HashMap::with_capacity(4000), - }; - let mut signer_public_keys = HashMap::with_capacity(reward_set_signers.len()); - for (i, entry) in reward_set_signers.iter().enumerate() { - let signer_id = u32::try_from(i).expect("FATAL: number of signers exceeds u32::MAX"); - let ecdsa_public_key = ecdsa::PublicKey::try_from(entry.signing_key.as_slice()).map_err(|e| { - ClientError::CorruptedRewardSet(format!( - "Reward cycle {reward_cycle} failed to convert signing key to ecdsa::PublicKey: {e}" - )) - })?; - let signer_public_key = Point::try_from(&Compressed::from(ecdsa_public_key.to_bytes())) - .map_err(|e| { - ClientError::CorruptedRewardSet(format!( - "Reward cycle {reward_cycle} failed to convert signing key to Point: {e}" - )) - })?; - let stacks_public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()).map_err(|e| { - ClientError::CorruptedRewardSet(format!( - "Reward cycle {reward_cycle} failed to convert signing key to StacksPublicKey: {e}" - )) - })?; - - let stacks_address = StacksAddress::p2pkh(self.mainnet, &stacks_public_key); - - signer_ids.insert(stacks_address, signer_id); - signer_public_keys.insert(signer_id, signer_public_key); - let weight_start = weight_end; - weight_end = weight_start + entry.weight; - for key_id in weight_start..weight_end { - public_keys.key_ids.insert(key_id, ecdsa_public_key); - public_keys.signers.insert(signer_id, ecdsa_public_key); - coordinator_key_ids - .entry(signer_id) - .or_insert(HashSet::with_capacity(entry.weight as usize)) - .insert(key_id); - signer_key_ids - .entry(signer_id) - .or_insert(Vec::with_capacity(entry.weight as usize)) - .push(key_id); - } - } - - let signer_set = - u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); - let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, self.mainnet); - // Get the signer writers from the stacker-db to find the signer slot id - let signer_slots_weights = self - .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, signer_set) - .unwrap(); - let mut signer_slot_ids = HashMap::with_capacity(signer_slots_weights.len()); - for (index, (address, _)) in signer_slots_weights.into_iter().enumerate() { - signer_slot_ids.insert( - address, - u32::try_from(index).expect("FATAL: number of signers exceeds u32::MAX"), - ); - } - - for address in signer_ids.keys() { - if !signer_slot_ids.contains_key(address) { - debug!("Signer {address} does not have a slot id in the stackerdb"); - return Ok(None); - } - } - - Ok(Some(RegisteredSignersInfo { - public_keys, - signer_key_ids, - signer_ids, - signer_slot_ids, - signer_public_keys, - coordinator_key_ids, - })) + Ok(stackers_response.stacker_set.signers) } /// Retreive the current pox data from the stacks node @@ -687,7 +590,9 @@ mod tests { use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; use blockstack_lib::chainstate::stacks::address::PoxAddress; - use blockstack_lib::chainstate::stacks::boot::{NakamotoSignerEntry, PoxStartCycleInfo}; + use blockstack_lib::chainstate::stacks::boot::{ + NakamotoSignerEntry, PoxStartCycleInfo, RewardSet, + }; use blockstack_lib::chainstate::stacks::ThresholdSignature; use rand::thread_rng; use rand_core::RngCore; @@ -1232,9 +1137,9 @@ mod tests { let stackers_response_json = serde_json::to_string(&stackers_response) .expect("Failed to serialize get stacker response"); let response = format!("HTTP/1.1 200 OK\n\n{stackers_response_json}"); - let h = spawn(move || mock.client.get_reward_set(0)); + let h = spawn(move || mock.client.get_reward_set_signers(0)); write_response(mock.server, response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), stacker_set); + assert_eq!(h.join().unwrap().unwrap(), stacker_set.signers); } #[test] diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index bb09d3262d..d8c7b4a8e9 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -32,6 +32,8 @@ use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; use wsts::state_machine::PublicKeys; +use crate::signer::SignerSlotID; + const EVENT_TIMEOUT_MS: u64 = 5000; // Default transaction fee in microstacks (if unspecificed in the config file) // TODO: Use the fee estimation endpoint to get the default fee. @@ -110,22 +112,20 @@ impl Network { } } -/// The registered signer information for a specific reward cycle +/// Parsed Reward Set #[derive(Debug, Clone)] -pub struct RegisteredSignersInfo { - /// The signer to key ids mapping for the coordinator - pub coordinator_key_ids: HashMap>, - /// The signer to key ids mapping for the signers - pub signer_key_ids: HashMap>, - /// The signer ids to wsts pubilc keys mapping - pub signer_public_keys: HashMap, - /// The signer addresses mapped to their signer ids +pub struct ParsedSignerEntries { + /// The signer addresses mapped to signer id pub signer_ids: HashMap, - /// The signer slot id for a signer address registered in stackerdb - /// This corresponds to their unique index when voting in a reward cycle - pub signer_slot_ids: HashMap, - /// The public keys for the reward cycle + /// The signer ids mapped to public key and key ids mapped to public keys pub public_keys: PublicKeys, + /// The signer ids mapped to key ids + pub signer_key_ids: HashMap>, + /// The signer ids mapped to wsts public keys + pub signer_public_keys: HashMap, + /// The signer ids mapped to a hash set of key ids + /// The wsts coordinator uses a hash set for each signer since it needs to do lots of lookups + pub coordinator_key_ids: HashMap>, } /// The Configuration info needed for an individual signer per reward cycle @@ -133,14 +133,16 @@ pub struct RegisteredSignersInfo { pub struct SignerConfig { /// The reward cycle of the configuration pub reward_cycle: u64, - /// The signer ID assigned to this signer + /// The signer ID assigned to this signer to be used in DKG and Sign rounds pub signer_id: u32, - /// The index into the signers list of this signer's key (may be different from signer_id) - pub signer_slot_id: u32, + /// The signer stackerdb slot id (may be different from signer_id) + pub signer_slot_id: SignerSlotID, /// This signer's key ids pub key_ids: Vec, /// The registered signers for this reward cycle - pub registered_signers: RegisteredSignersInfo, + pub signer_entries: ParsedSignerEntries, + /// The signer slot ids of all signers registered for this reward cycle + pub signer_slot_ids: Vec, /// The Scalar representation of the private key for signer communication pub ecdsa_private_key: Scalar, /// The private key for this signer diff --git a/stacks-signer/src/coordinator.rs b/stacks-signer/src/coordinator.rs index 2c23fd0b32..234d1ade84 100644 --- a/stacks-signer/src/coordinator.rs +++ b/stacks-signer/src/coordinator.rs @@ -174,7 +174,7 @@ mod tests { let number_of_tests = 5; let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); let public_keys = generate_signer_config(&config, 10, 4000) - .registered_signers + .signer_entries .public_keys; let mut results = Vec::new(); @@ -197,7 +197,7 @@ mod tests { ) -> Vec> { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); let public_keys = generate_signer_config(&config, 10, 4000) - .registered_signers + .signer_entries .public_keys; let mut results = Vec::new(); let same_hash = generate_random_consensus_hash(); diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 02fb494c6a..c0ec846acd 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -18,17 +18,21 @@ use std::sync::mpsc::Sender; use std::time::Duration; use blockstack_lib::chainstate::burn::ConsensusHashExtensions; -use hashbrown::HashMap; +use blockstack_lib::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; +use blockstack_lib::util_lib::boot::boot_code_id; +use hashbrown::{HashMap, HashSet}; use libsigner::{SignerEvent, SignerRunLoop}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; -use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::types::chainstate::{ConsensusHash, StacksAddress, StacksPublicKey}; use stacks_common::{debug, error, info, warn}; +use wsts::curve::ecdsa; +use wsts::curve::point::{Compressed, Point}; use wsts::state_machine::coordinator::State as CoordinatorState; -use wsts::state_machine::OperationResult; +use wsts::state_machine::{OperationResult, PublicKeys}; use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; -use crate::config::{GlobalConfig, SignerConfig}; -use crate::signer::{Command as SignerCommand, Signer, State as SignerState}; +use crate::config::{GlobalConfig, ParsedSignerEntries, SignerConfig}; +use crate::signer::{Command as SignerCommand, Signer, SignerSlotID, State as SignerState}; /// Which operation to perform #[derive(PartialEq, Clone, Debug)] @@ -78,27 +82,118 @@ impl From for RunLoop { } impl RunLoop { + /// Parse Nakamoto signer entries into relevant signer information + pub fn parse_nakamoto_signer_entries( + signers: &[NakamotoSignerEntry], + is_mainnet: bool, + ) -> ParsedSignerEntries { + let mut weight_end = 1; + let mut coordinator_key_ids = HashMap::with_capacity(4000); + let mut signer_key_ids = HashMap::with_capacity(signers.len()); + let mut signer_ids = HashMap::with_capacity(signers.len()); + let mut public_keys = PublicKeys { + signers: HashMap::with_capacity(signers.len()), + key_ids: HashMap::with_capacity(4000), + }; + let mut signer_public_keys = HashMap::with_capacity(signers.len()); + for (i, entry) in signers.iter().enumerate() { + // TODO: track these signer ids as non participating if any of the conversions fail + let signer_id = u32::try_from(i).expect("FATAL: number of signers exceeds u32::MAX"); + let ecdsa_public_key = ecdsa::PublicKey::try_from(entry.signing_key.as_slice()) + .expect("FATAL: corrupted signing key"); + let signer_public_key = Point::try_from(&Compressed::from(ecdsa_public_key.to_bytes())) + .expect("FATAL: corrupted signing key"); + let stacks_public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + .expect("FATAL: Corrupted signing key"); + + let stacks_address = StacksAddress::p2pkh(is_mainnet, &stacks_public_key); + signer_ids.insert(stacks_address, signer_id); + signer_public_keys.insert(signer_id, signer_public_key); + let weight_start = weight_end; + weight_end = weight_start + entry.weight; + for key_id in weight_start..weight_end { + public_keys.key_ids.insert(key_id, ecdsa_public_key); + public_keys.signers.insert(signer_id, ecdsa_public_key); + coordinator_key_ids + .entry(signer_id) + .or_insert(HashSet::with_capacity(entry.weight as usize)) + .insert(key_id); + signer_key_ids + .entry(signer_id) + .or_insert(Vec::with_capacity(entry.weight as usize)) + .push(key_id); + } + } + ParsedSignerEntries { + signer_ids, + public_keys, + signer_key_ids, + signer_public_keys, + coordinator_key_ids, + } + } + + /// Get the registered signers for a specific reward cycle + /// Returns None if no signers are registered or its not Nakamoto cycle + pub fn get_parsed_reward_set( + &self, + reward_cycle: u64, + ) -> Result, ClientError> { + debug!("Getting registered signers for reward cycle {reward_cycle}..."); + let Some(signers) = self.stacks_client.get_reward_set_signers(reward_cycle)? else { + warn!("No reward set signers found for reward cycle {reward_cycle}."); + return Ok(None); + }; + if signers.is_empty() { + warn!("No registered signers found for reward cycle {reward_cycle}."); + return Ok(None); + } + Ok(Some(Self::parse_nakamoto_signer_entries( + &signers, + self.config.network.is_mainnet(), + ))) + } + + /// Get the stackerdb signer slots for a specific reward cycle + pub fn get_parsed_signer_slots( + &self, + stacks_client: &StacksClient, + reward_cycle: u64, + ) -> Result, ClientError> { + let signer_set = + u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); + let signer_stackerdb_contract_id = + boot_code_id(SIGNERS_NAME, self.config.network.is_mainnet()); + // Get the signer writers from the stacker-db to find the signer slot id + let stackerdb_signer_slots = + stacks_client.get_stackerdb_signer_slots(&signer_stackerdb_contract_id, signer_set)?; + let mut signer_slot_ids = HashMap::with_capacity(stackerdb_signer_slots.len()); + for (index, (address, _)) in stackerdb_signer_slots.into_iter().enumerate() { + signer_slot_ids.insert( + address, + SignerSlotID( + u32::try_from(index).expect("FATAL: number of signers exceeds u32::MAX"), + ), + ); + } + Ok(signer_slot_ids) + } /// Get a signer configuration for a specific reward cycle from the stacks node fn get_signer_config(&mut self, reward_cycle: u64) -> Option { // We can only register for a reward cycle if a reward set exists. - let registered_signers = self - .stacks_client - .get_registered_signers_info(reward_cycle).map_err(|e| { - error!( - "Failed to retrieve registered signers info for reward cycle {reward_cycle}: {e}" - ); - e - }).ok()??; - + let signer_entries = self.get_parsed_reward_set(reward_cycle).ok()??; + let signer_slot_ids = self + .get_parsed_signer_slots(&self.stacks_client, reward_cycle) + .ok()?; let current_addr = self.stacks_client.get_signer_address(); - let Some(signer_slot_id) = registered_signers.signer_slot_ids.get(current_addr) else { + let Some(signer_slot_id) = signer_slot_ids.get(current_addr) else { warn!( "Signer {current_addr} was not found in stacker db. Must not be registered for this reward cycle {reward_cycle}." ); return None; }; - let Some(signer_id) = registered_signers.signer_ids.get(current_addr) else { + let Some(signer_id) = signer_entries.signer_ids.get(current_addr) else { warn!( "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." ); @@ -107,7 +202,7 @@ impl RunLoop { info!( "Signer #{signer_id} ({current_addr}) is registered for reward cycle {reward_cycle}." ); - let key_ids = registered_signers + let key_ids = signer_entries .signer_key_ids .get(signer_id) .cloned() @@ -117,7 +212,8 @@ impl RunLoop { signer_id: *signer_id, signer_slot_id: *signer_slot_id, key_ids, - registered_signers, + signer_entries, + signer_slot_ids: signer_slot_ids.into_values().collect(), ecdsa_private_key: self.config.ecdsa_private_key, stacks_private_key: self.config.stacks_private_key, node_host: self.config.node_host, @@ -156,20 +252,13 @@ impl RunLoop { if signer.reward_cycle == prior_reward_cycle { // The signers have been calculated for the next reward cycle. Update the current one debug!("Signer #{}: Next reward cycle ({reward_cycle}) signer set calculated. Updating current reward cycle ({prior_reward_cycle}) signer.", signer.signer_id); - signer.next_signers = new_signer_config - .registered_signers + signer.next_signer_addresses = new_signer_config + .signer_entries .signer_ids .keys() .copied() .collect(); - signer.next_signer_ids = new_signer_config - .registered_signers - .signer_ids - .values() - .copied() - .collect(); - signer.next_signer_slot_ids = - new_signer_config.registered_signers.signer_slot_ids.clone(); + signer.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); } } self.stacks_signers @@ -301,3 +390,36 @@ impl SignerRunLoop, RunLoopCommand> for RunLoop { None } } +#[cfg(test)] +mod tests { + use blockstack_lib::chainstate::stacks::boot::NakamotoSignerEntry; + use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; + + use super::RunLoop; + + #[test] + fn parse_nakamoto_signer_entries_test() { + let nmb_signers = 10; + let weight = 10; + let mut signer_entries = Vec::with_capacity(nmb_signers); + for _ in 0..nmb_signers { + let key = StacksPublicKey::from_private(&StacksPrivateKey::new()).to_bytes_compressed(); + let mut signing_key = [0u8; 33]; + signing_key.copy_from_slice(&key); + signer_entries.push(NakamotoSignerEntry { + signing_key, + stacked_amt: 0, + weight, + }); + } + + let parsed_entries = RunLoop::parse_nakamoto_signer_entries(&signer_entries, false); + assert_eq!(parsed_entries.signer_ids.len(), nmb_signers); + let mut signer_ids = parsed_entries.signer_ids.into_values().collect::>(); + signer_ids.sort(); + assert_eq!( + signer_ids, + (0..nmb_signers).map(|id| id as u32).collect::>() + ); + } +} diff --git a/stacks-signer/src/signer.rs b/stacks-signer/src/signer.rs index fd80138aa1..59962e5ae5 100644 --- a/stacks-signer/src/signer.rs +++ b/stacks-signer/src/signer.rs @@ -46,6 +46,16 @@ use crate::client::{retry_with_exponential_backoff, ClientError, StackerDB, Stac use crate::config::SignerConfig; use crate::coordinator::CoordinatorSelector; +/// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID +#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] +pub struct SignerSlotID(pub u32); + +impl std::fmt::Display for SignerSlotID { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + /// Additional Info about a proposed block pub struct BlockInfo { /// The block we are considering @@ -128,18 +138,14 @@ pub struct Signer { pub mainnet: bool, /// The signer id pub signer_id: u32, - /// The other signer ids for this signer's reward cycle - pub signer_ids: Vec, - /// The addresses of other signers mapped to their signer slot ID - pub signer_slot_ids: HashMap, + /// The signer slot ids for the signers in the reward cycle + pub signer_slot_ids: Vec, /// The addresses of other signers - pub signers: Vec, - /// The other signer ids for the NEXT reward cycle's signers - pub next_signer_ids: Vec, - /// The signer addresses mapped to slot ID for the NEXT reward cycle's signers - pub next_signer_slot_ids: HashMap, + pub signer_addresses: Vec, + /// The signer slot ids for the signers in the NEXT reward cycle + pub next_signer_slot_ids: Vec, /// The addresses of the signers for the NEXT reward cycle - pub next_signers: Vec, + pub next_signer_addresses: Vec, /// The reward cycle this signer belongs to pub reward_cycle: u64, /// The tx fee in uSTX to use if the epoch is pre Nakamoto (Epoch 3.0) @@ -154,9 +160,9 @@ impl From for Signer { fn from(signer_config: SignerConfig) -> Self { let stackerdb = StackerDB::from(&signer_config); - let num_signers = u32::try_from(signer_config.registered_signers.public_keys.signers.len()) + let num_signers = u32::try_from(signer_config.signer_entries.public_keys.signers.len()) .expect("FATAL: Too many registered signers to fit in a u32"); - let num_keys = u32::try_from(signer_config.registered_signers.public_keys.key_ids.len()) + let num_keys = u32::try_from(signer_config.signer_entries.public_keys.key_ids.len()) .expect("FATAL: Too many key ids to fit in a u32"); let threshold = (num_keys as f64 * 7_f64 / 10_f64).ceil() as u32; let dkg_threshold = (num_keys as f64 * 9_f64 / 10_f64).ceil() as u32; @@ -172,8 +178,8 @@ impl From for Signer { dkg_end_timeout: signer_config.dkg_end_timeout, nonce_timeout: signer_config.nonce_timeout, sign_timeout: signer_config.sign_timeout, - signer_key_ids: signer_config.registered_signers.coordinator_key_ids, - signer_public_keys: signer_config.registered_signers.signer_public_keys, + signer_key_ids: signer_config.signer_entries.coordinator_key_ids, + signer_public_keys: signer_config.signer_entries.signer_public_keys, }; let coordinator = FireCoordinator::new(coordinator_config); @@ -184,10 +190,10 @@ impl From for Signer { signer_config.signer_id, signer_config.key_ids, signer_config.ecdsa_private_key, - signer_config.registered_signers.public_keys.clone(), + signer_config.signer_entries.public_keys.clone(), ); let coordinator_selector = - CoordinatorSelector::from(signer_config.registered_signers.public_keys); + CoordinatorSelector::from(signer_config.signer_entries.public_keys); debug!( "Signer #{}: initial coordinator is signer {}", @@ -204,22 +210,14 @@ impl From for Signer { stackerdb, mainnet: signer_config.mainnet, signer_id: signer_config.signer_id, - signer_ids: signer_config - .registered_signers - .signer_ids - .values() - .copied() - .collect(), - signer_slot_ids: signer_config.registered_signers.signer_slot_ids, - signers: signer_config - .registered_signers + signer_addresses: signer_config + .signer_entries .signer_ids - .keys() - .copied() + .into_keys() .collect(), - next_signer_ids: vec![], - next_signer_slot_ids: HashMap::new(), - next_signers: vec![], + signer_slot_ids: signer_config.signer_slot_ids.clone(), + next_signer_slot_ids: vec![], + next_signer_addresses: vec![], reward_cycle: signer_config.reward_cycle, tx_fee_ustx: signer_config.tx_fee_ustx, coordinator_selector, @@ -714,7 +712,7 @@ impl Signer { ) -> Result, ClientError> { let transactions: Vec<_> = self .stackerdb - .get_current_transactions_with_retry(self.signer_id)? + .get_current_transactions_with_retry()? .into_iter() .filter_map(|tx| { if !NakamotoSigners::valid_vote_transaction(nonces, &tx, self.mainnet) { @@ -731,7 +729,7 @@ impl Signer { &mut self, stacks_client: &StacksClient, ) -> Result, ClientError> { - if self.next_signer_ids.is_empty() { + if self.next_signer_slot_ids.is_empty() { debug!( "Signer #{}: No next signers. Skipping transaction retrieval.", self.signer_id @@ -739,10 +737,10 @@ impl Signer { return Ok(vec![]); } // Get all the account nonces for the next signers - let account_nonces = self.get_account_nonces(stacks_client, &self.next_signers); + let account_nonces = self.get_account_nonces(stacks_client, &self.next_signer_addresses); let transactions: Vec<_> = self .stackerdb - .get_next_transactions_with_retry(&self.next_signer_ids)?; + .get_next_transactions_with_retry(&self.next_signer_slot_ids)?; let mut filtered_transactions = std::collections::HashMap::new(); NakamotoSigners::update_filtered_transactions( &mut filtered_transactions, @@ -874,7 +872,7 @@ impl Signer { // Get our current nonce from the stacks node and compare it against what we have sitting in the stackerdb instance let signer_address = stacks_client.get_signer_address(); // Retreieve ALL account nonces as we may have transactions from other signers in our stackerdb slot that we care about - let account_nonces = self.get_account_nonces(stacks_client, &self.signers); + let account_nonces = self.get_account_nonces(stacks_client, &self.signer_addresses); let account_nonce = account_nonces.get(signer_address).unwrap_or(&0); let signer_transactions = retry_with_exponential_backoff(|| { self.get_signer_transactions(&account_nonces) @@ -893,7 +891,7 @@ impl Signer { .map(|tx| tx.get_origin_nonce().wrapping_add(1)) .unwrap_or(*account_nonce); match stacks_client.build_vote_for_aggregate_public_key( - self.stackerdb.get_signer_slot_id(), + self.stackerdb.get_signer_slot_id().0, self.coordinator.current_dkg_id, *dkg_public_key, self.reward_cycle, diff --git a/testnet/stacks-node/src/tests/signer.rs b/testnet/stacks-node/src/tests/signer.rs index 3fd265d798..c0c2e72e2b 100644 --- a/testnet/stacks-node/src/tests/signer.rs +++ b/testnet/stacks-node/src/tests/signer.rs @@ -42,7 +42,7 @@ use stacks_common::util::secp256k1::MessageSignature; use stacks_signer::client::{StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::RunLoopCommand; -use stacks_signer::signer::Command as SignerCommand; +use stacks_signer::signer::{Command as SignerCommand, SignerSlotID}; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; use wsts::common::Signature; @@ -531,7 +531,7 @@ impl SignerTest { .unwrap() } - fn get_signer_index(&self, reward_cycle: u64) -> u32 { + fn get_signer_index(&self, reward_cycle: u64) -> SignerSlotID { let valid_signer_set = u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); @@ -541,7 +541,9 @@ impl SignerTest { .expect("FATAL: failed to get signer slots from stackerdb") .iter() .position(|(address, _)| address == self.stacks_client.get_signer_address()) - .map(|pos| u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) + .map(|pos| { + SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) + }) .expect("FATAL: signer not registered") }