diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9af044ce..61c44888 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -28,7 +28,7 @@ jobs: run: dprint check - name: cargo fmt - run: cargo fmt --all -- --check + run: cargo +nightly fmt --all -- --check - name: Run ShellCheck uses: ludeeus/action-shellcheck@master @@ -51,7 +51,7 @@ jobs: components: clippy - name: cargo clippy - run: cargo clippy --locked --workspace --examples --tests --all-features -- -D warnings + run: cargo clippy --locked --workspace --examples --tests --all-features -- -D warnings -D clippy::unwrap_used -D clippy::expect_used -D clippy::nursery -D clippy::pedantic -A clippy::module_name_repetitions cargo-deny: runs-on: self-hosted diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 00000000..af0959f0 --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1,24 @@ +# Basic +edition = "2021" +# hard_tabs = true +max_width = 100 +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" +reorder_imports = true +# Consistency +newline_style = "Unix" +# Misc +chain_width = 80 +spaces_around_ranges = false +binop_separator = "Back" +reorder_impl_items = false +match_arm_leading_pipes = "Preserve" +match_arm_blocks = true +match_block_trailing_comma = true +trailing_comma = "Vertical" +trailing_semicolon = true +use_field_init_shorthand = true +# Format comments +comment_width = 100 +wrap_comments = true diff --git a/Cargo.lock b/Cargo.lock index 19ffa060..a3956901 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4954,6 +4954,7 @@ dependencies = [ "sp-keyring", "subxt", "tokio", + "tracing", ] [[package]] diff --git a/chains/astar/config/src/lib.rs b/chains/astar/config/src/lib.rs index b2c711f0..9f775f0d 100644 --- a/chains/astar/config/src/lib.rs +++ b/chains/astar/config/src/lib.rs @@ -12,6 +12,10 @@ pub mod metadata { pub mod dev {} } +/// Retrieve the [`BlockchainConfig`] from the provided `network` +/// +/// # Errors +/// Returns `Err` if the network is not supported pub fn config(network: &str) -> Result { // All available networks are listed here: // https://github.com/AstarNetwork/Astar/blob/v5.15.0/bin/collator/src/command.rs#L88-L100 diff --git a/chains/astar/server/src/lib.rs b/chains/astar/server/src/lib.rs index daa0cb5b..127444fa 100644 --- a/chains/astar/server/src/lib.rs +++ b/chains/astar/server/src/lib.rs @@ -40,20 +40,25 @@ pub struct AstarClient { } impl AstarClient { + /// Creates a new polkadot client, loading the config from `network` and connects to `addr` + /// + /// # Errors + /// Will return `Err` when the network is invalid, or when the provided `addr` is unreacheable. pub async fn new(network: &str, url: &str) -> Result { let config = rosetta_config_astar::config(network)?; Self::from_config(config, url).await } + /// Creates a new polkadot client using the provided `config` and connects to `addr` + /// + /// # Errors + /// Will return `Err` when the network is invalid, or when the provided `addr` is unreacheable. pub async fn from_config(config: BlockchainConfig, url: &str) -> Result { let client = default_client(url, None).await?; let substrate_client = OnlineClient::::from_rpc_client(Arc::new(client.clone())).await?; let ethereum_client = MaybeWsEthereumClient::from_jsonrpsee(config, client).await?; - Ok(Self { - client: ethereum_client, - ws_client: substrate_client, - }) + Ok(Self { client: ethereum_client, ws_client: substrate_client }) } async fn account_info( @@ -80,30 +85,28 @@ impl AstarClient { .ok_or_else(|| anyhow::anyhow!("no block hash found"))? }; - let account_info = self - .ws_client - .storage() - .at(block_hash) - .fetch(&storage_query) - .await?; - - if let Some(account_info) = account_info { - >>::decode(&mut account_info.encoded()) - .map_err(|_| anyhow::anyhow!("invalid format")) - } else { - Ok(AccountInfo::> { - nonce: 0, - consumers: 0, - providers: 0, - sufficients: 0, - data: AccountData { - free: 0, - reserved: 0, - frozen: 0, - flags: astar_metadata::runtime_types::pallet_balances::types::ExtraFlags(0), - }, - }) - } + let account_info = self.ws_client.storage().at(block_hash).fetch(&storage_query).await?; + + account_info.map_or_else( + || { + Ok(AccountInfo::> { + nonce: 0, + consumers: 0, + providers: 0, + sufficients: 0, + data: AccountData { + free: 0, + reserved: 0, + frozen: 0, + flags: astar_metadata::runtime_types::pallet_balances::types::ExtraFlags(0), + }, + }) + }, + |account_info| { + >>::decode(&mut account_info.encoded()) + .map_err(|_| anyhow::anyhow!("invalid format")) + }, + ) } } @@ -138,7 +141,7 @@ impl BlockchainClient for AstarClient { AddressFormat::Ss58(_) => { let account_info = self.account_info(address, Some(block)).await?; account_info.data.free - } + }, AddressFormat::Eip55 => { // Frontier `eth_getBalance` returns the reducible_balance instead the free balance: // https://github.com/paritytech/frontier/blob/polkadot-v0.9.43/frame/evm/src/lib.rs#L853-L855 @@ -148,10 +151,8 @@ impl BlockchainClient for AstarClient { .map_err(|err| anyhow::anyhow!("{}", err))?; let account_info = self.account_info(&address, Some(block)).await?; account_info.data.free - } - _ => { - return Err(anyhow::anyhow!("invalid address format")); - } + }, + AddressFormat::Bech32(_) => return Err(anyhow::anyhow!("invalid address format")), }; Ok(balance) } @@ -192,9 +193,7 @@ impl BlockchainClient for AstarClient { public_key: &PublicKey, options: &Self::MetadataParams, ) -> Result { - Ok(AstarMetadata( - self.client.metadata(public_key, &options.0).await?, - )) + Ok(AstarMetadata(self.client.metadata(public_key, &options.0).await?)) } async fn submit(&self, transaction: &[u8]) -> Result> { @@ -225,12 +224,10 @@ impl BlockchainClient for AstarClient { #[cfg(test)] mod tests { use super::*; - use ethers_solc::artifacts::Source; - use ethers_solc::{CompilerInput, EvmVersion, Solc}; + use ethers_solc::{artifacts::Source, CompilerInput, EvmVersion, Solc}; use rosetta_docker::Env; use sha3::Digest; - use std::collections::BTreeMap; - use std::path::Path; + use std::{collections::BTreeMap, path::Path}; pub async fn client_from_config(config: BlockchainConfig) -> Result { let url = config.node_uri.to_string(); @@ -301,13 +298,9 @@ mod tests { let tx_hash = wallet.eth_deploy_contract(bytes).await?; let receipt = wallet.eth_transaction_receipt(&tx_hash).await?; - let contract_address = receipt - .get("contractAddress") - .and_then(Value::as_str) - .unwrap(); - let tx_hash = wallet - .eth_send_call(contract_address, "function emitEvent()", &[], 0) - .await?; + let contract_address = receipt.get("contractAddress").and_then(Value::as_str).unwrap(); + let tx_hash = + wallet.eth_send_call(contract_address, "function emitEvent()", &[], 0).await?; let receipt = wallet.eth_transaction_receipt(&tx_hash).await?; let logs = receipt.get("logs").and_then(Value::as_array).unwrap(); assert_eq!(logs.len(), 1); diff --git a/chains/bitcoin/config/src/lib.rs b/chains/bitcoin/config/src/lib.rs index 58c0dbc4..c6763705 100644 --- a/chains/bitcoin/config/src/lib.rs +++ b/chains/bitcoin/config/src/lib.rs @@ -1,9 +1,14 @@ use anyhow::Result; -use rosetta_core::crypto::address::AddressFormat; -use rosetta_core::crypto::Algorithm; -use rosetta_core::{BlockchainConfig, NodeUri}; +use rosetta_core::{ + crypto::{address::AddressFormat, Algorithm}, + BlockchainConfig, NodeUri, +}; use std::sync::Arc; +/// Retrieve the [`BlockchainConfig`] from the provided `network` +/// +/// # Errors +/// Returns `Err` if the network is not supported pub fn config(network: &str) -> Result { let (network, symbol, bip44_id) = match network { "regtest" => ("regtest", "tBTC", 1), diff --git a/chains/bitcoin/server/src/lib.rs b/chains/bitcoin/server/src/lib.rs index 6a7674fe..59d73564 100644 --- a/chains/bitcoin/server/src/lib.rs +++ b/chains/bitcoin/server/src/lib.rs @@ -1,6 +1,5 @@ use anyhow::{Context, Result}; -use bitcoincore_rpc_async::bitcoin::BlockHash; -use bitcoincore_rpc_async::{Auth, Client, RpcApi}; +use bitcoincore_rpc_async::{bitcoin::BlockHash, Auth, Client, RpcApi}; use rosetta_core::{ crypto::{address::Address, PublicKey}, types::{ @@ -22,35 +21,35 @@ pub struct BitcoinClient { } impl BitcoinClient { + /// Creates a new bitcoin client from `network`and `addr` + /// + /// # Errors + /// Will return `Err` when the network is invalid, or when the provided `addr` is unreacheable. pub async fn new(network: &str, addr: &str) -> Result { let config = rosetta_config_bitcoin::config(network)?; Self::from_config(config, addr).await } + /// Creates a new bitcoin client from `config` and `addr` + /// + /// # Errors + /// Will return `Err` when the network is invalid, or when the provided `addr` is unreacheable. pub async fn from_config(config: BlockchainConfig, addr: &str) -> Result { - let client = Client::new( - addr.to_string(), - Auth::UserPass("rosetta".into(), "rosetta".into()), - ) - .await?; + let client = + Client::new(addr.to_string(), Auth::UserPass("rosetta".into(), "rosetta".into())) + .await?; let genesis = client.get_block_hash(0).await?; - let genesis_block = BlockIdentifier { - index: 0, - hash: genesis.to_string(), - }; + let genesis_block = BlockIdentifier { index: 0, hash: genesis.to_string() }; - Ok(Self { - config, - client, - genesis_block, - }) + Ok(Self { config, client, genesis_block }) } } /// Bitcoin community has adopted 6 blocks as a standard confirmation period. -/// That is, once a transaction is included in a block in the blockchain which is followed up by at least 6 additional blocks -/// the transaction is called “confirmed.” While this was chosen somewhat arbitrarily, it is a reasonably safe value in practice -/// as the only time this would have left users vulnerable to double-spending was the atypical March 2013 fork. +/// That is, once a transaction is included in a block in the blockchain which is followed up by at +/// least 6 additional blocks the transaction is called “confirmed.” While this was chosen somewhat +/// arbitrarily, it is a reasonably safe value in practice as the only time this would have left +/// users vulnerable to double-spending was the atypical March 2013 fork. const CONFIRMATION_PERIOD: u64 = 6; #[async_trait::async_trait] @@ -79,23 +78,13 @@ impl BlockchainClient for BitcoinClient { async fn current_block(&self) -> Result { let hash = self.client.get_best_block_hash().await?; let info = self.client.get_block_info(&hash).await?; - Ok(BlockIdentifier { - index: info.height as u64, - hash: hash.to_string(), - }) + Ok(BlockIdentifier { index: info.height as u64, hash: hash.to_string() }) } async fn finalized_block(&self) -> Result { - let index = self - .client - .get_block_count() - .await? - .saturating_sub(CONFIRMATION_PERIOD); + let index = self.client.get_block_count().await?.saturating_sub(CONFIRMATION_PERIOD); let hash = self.client.get_block_hash(index).await?; - Ok(BlockIdentifier { - index, - hash: hash.to_string(), - }) + Ok(BlockIdentifier { index, hash: hash.to_string() }) } async fn balance(&self, _address: &Address, _block: &BlockIdentifier) -> Result { @@ -127,28 +116,24 @@ impl BlockchainClient for BitcoinClient { (Some(block_hash), _) => { let hash = BlockHash::from_str(block_hash).context("Invalid block hash")?; self.client.get_block(&hash).await? - } + }, (None, Some(height)) => { - let block_bash = self - .client - .get_block_hash(height) - .await - .context("cannot find by index")?; + let block_bash = + self.client.get_block_hash(height).await.context("cannot find by index")?; self.client.get_block(&block_bash).await? - } + }, (None, None) => anyhow::bail!("the block hash or index must be specified"), }; - let block_height = match block.bip34_block_height().ok() { - Some(height) => height, - None => { - let info = self - .client - .get_block_info(&block.block_hash()) - .await - .context("Cannot find block height")?; - info.height as u64 - } + let block_height = if let Ok(height) = block.bip34_block_height() { + height + } else { + let info = self + .client + .get_block_info(&block.block_hash()) + .await + .context("Cannot find block height")?; + info.height as u64 }; let transactions = block @@ -171,7 +156,7 @@ impl BlockchainClient for BitcoinClient { index: block_height.saturating_sub(1), hash: block.header.prev_blockhash.to_string(), }, - timestamp: (block.header.time as i64) * 1000, + timestamp: i64::from(block.header.time) * 1000, transactions, metadata: None, }) diff --git a/chains/ethereum/config/src/lib.rs b/chains/ethereum/config/src/lib.rs index 1e341f4c..cd623e73 100644 --- a/chains/ethereum/config/src/lib.rs +++ b/chains/ethereum/config/src/lib.rs @@ -1,11 +1,16 @@ use anyhow::Result; use rosetta_config_astar::config as astar_config; -use rosetta_core::crypto::address::AddressFormat; -use rosetta_core::crypto::Algorithm; -use rosetta_core::{BlockchainConfig, NodeUri}; +use rosetta_core::{ + crypto::{address::AddressFormat, Algorithm}, + BlockchainConfig, NodeUri, +}; use serde::{Deserialize, Serialize}; use std::sync::Arc; +/// Retrieve the [`BlockchainConfig`] from the provided polygon `network` +/// +/// # Errors +/// Returns `Err` if the network is not supported pub fn polygon_config(network: &str) -> Result { let (network, bip44_id, is_dev) = match network { "dev" => ("dev", 1, true), @@ -17,6 +22,10 @@ pub fn polygon_config(network: &str) -> Result { Ok(evm_config("polygon", network, "MATIC", bip44_id, is_dev)) } +/// Retrieve the [`BlockchainConfig`] from the provided ethereum `network` +/// +/// # Errors +/// Returns `Err` if the network is not supported pub fn config(network: &str) -> Result { let (network, symbol, bip44_id, is_dev) = match network { "dev" => ("dev", "ETH", 1, true), @@ -54,15 +63,14 @@ fn evm_config( currency_unit: "wei", currency_symbol: symbol, currency_decimals: 18, - node_uri: NodeUri::parse("ws://127.0.0.1:8545/ws").expect("uri is valid; qed"), + node_uri: { + #[allow(clippy::expect_used)] + NodeUri::parse("ws://127.0.0.1:8545/ws").expect("uri is valid; qed") + }, node_image: "ethereum/client-go:v1.12.2", node_command: Arc::new(|network, port| { let mut params = if network == "dev" { - vec![ - "--dev".into(), - "--dev.period=1".into(), - "--ipcdisable".into(), - ] + vec!["--dev".into(), "--dev.period=1".into(), "--ipcdisable".into()] } else { vec!["--syncmode=full".into()] }; diff --git a/chains/ethereum/rpc-client/src/client.rs b/chains/ethereum/rpc-client/src/client.rs index c013a4cd..9f50ac89 100644 --- a/chains/ethereum/rpc-client/src/client.rs +++ b/chains/ethereum/rpc-client/src/client.rs @@ -2,10 +2,11 @@ use crate::{error::EthError, extension::impl_client_trait, params::EthRpcParams} use async_trait::async_trait; use ethers::providers::JsonRpcClient; use jsonrpsee::core::client::ClientT; -use serde::de::DeserializeOwned; -use serde::Serialize; -use std::fmt::{Debug, Formatter}; -use std::ops::{Deref, DerefMut}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{ + fmt::{Debug, Formatter}, + ops::{Deref, DerefMut}, +}; /// Adapter for [`jsonrpsee::core::client::ClientT`] to [`ethers::providers::JsonRpcClient`]. #[repr(transparent)] @@ -17,7 +18,7 @@ impl EthClientAdapter where C: ClientT + Debug + Send + Sync, { - pub fn new(client: C) -> Self { + pub const fn new(client: C) -> Self { Self { client } } } @@ -35,9 +36,7 @@ where C: Debug, { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ClientAdapter") - .field("client", &self.client) - .finish() + f.debug_struct("ClientAdapter").field("client", &self.client).finish() } } @@ -46,9 +45,7 @@ where C: Clone, { fn clone(&self) -> Self { - Self { - client: self.client.clone(), - } + Self { client: self.client.clone() } } } diff --git a/chains/ethereum/rpc-client/src/error.rs b/chains/ethereum/rpc-client/src/error.rs index f5c1b209..03755101 100644 --- a/chains/ethereum/rpc-client/src/error.rs +++ b/chains/ethereum/rpc-client/src/error.rs @@ -9,10 +9,7 @@ use jsonrpsee::{client_transport::ws::WsHandshakeError, core::Error as JsonRpsee pub enum EthError { /// Thrown if the response could not be parsed #[error("{original}")] - JsonRpsee { - original: JsonRpseeError, - message: Option, - }, + JsonRpsee { original: JsonRpseeError, message: Option }, /// Failed to parse the data. #[allow(clippy::enum_variant_names)] @@ -36,33 +33,24 @@ impl From for EthError { fn from(error: JsonRpseeError) -> Self { match error { JsonRpseeError::Call(call) => { - let code = call.code() as i64; - let data = call - .data() - .and_then(|raw_value| serde_json::value::to_value(raw_value).ok()); + let code = i64::from(call.code()); + let data = + call.data().and_then(|raw_value| serde_json::value::to_value(raw_value).ok()); let message = call.message().to_string(); Self::JsonRpsee { original: JsonRpseeError::Call(call), - message: Some(EthJsonRpcError { - code, - message, - data, - }), + message: Some(EthJsonRpcError { code, message, data }), } - } + }, JsonRpseeError::ParseError(serde_error) => Self::ParseError(serde_error), JsonRpseeError::RestartNeeded(reason) => Self::RestartNeeded(reason), error => { let message = format!("{}", &error); Self::JsonRpsee { original: error, - message: Some(EthJsonRpcError { - code: 9999, - message, - data: None, - }), + message: Some(EthJsonRpcError { code: 9999, message, data: None }), } - } + }, } } } diff --git a/chains/ethereum/rpc-client/src/params.rs b/chains/ethereum/rpc-client/src/params.rs index 4870ec36..60319c32 100644 --- a/chains/ethereum/rpc-client/src/params.rs +++ b/chains/ethereum/rpc-client/src/params.rs @@ -1,6 +1,5 @@ use jsonrpsee::core::{error::Error as JsonRpseeError, traits::ToRpcParams}; -use serde::de::DeserializeOwned; -use serde::Serialize; +use serde::{de::DeserializeOwned, Serialize}; use serde_json::value::RawValue; use std::fmt::{Display, Formatter}; diff --git a/chains/ethereum/rpc-client/src/pubsub.rs b/chains/ethereum/rpc-client/src/pubsub.rs index 38b74c3b..fe504c2b 100644 --- a/chains/ethereum/rpc-client/src/pubsub.rs +++ b/chains/ethereum/rpc-client/src/pubsub.rs @@ -1,15 +1,17 @@ -use crate::client::EthClientAdapter; -use crate::prelude::ToRpcParams; -use crate::subscription::EthSubscription; use crate::{ + client::EthClientAdapter, error::EthError, extension::{impl_client_trait, impl_subscription_trait}, params::EthRpcParams, + prelude::ToRpcParams, + subscription::EthSubscription, }; use async_trait::async_trait; use dashmap::DashMap; -use ethers::providers::{JsonRpcClient, PubsubClient}; -use ethers::types::U256; +use ethers::{ + providers::{JsonRpcClient, PubsubClient}, + types::U256, +}; use jsonrpsee::{ core::{ client::{ClientT, Subscription, SubscriptionClientT, SubscriptionKind}, @@ -17,19 +19,21 @@ use jsonrpsee::{ }, types::SubscriptionId, }; -use serde::de::DeserializeOwned; -use serde::Serialize; -use std::ops::{Deref, DerefMut}; -use std::sync::atomic::{AtomicBool, Ordering}; +use serde::{de::DeserializeOwned, Serialize}; use std::{ fmt::{Debug, Formatter}, - sync::Arc, + ops::{Deref, DerefMut}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, }; const ETHEREUM_SUBSCRIBE_METHOD: &str = "eth_subscribe"; const ETHEREUM_UNSUBSCRIBE_METHOD: &str = "eth_unsubscribe"; -/// Adapter for [`jsonrpsee::core::client::SubscriptionClientT`] to [`ethers::providers::PubsubClient`]. +/// Adapter for [`jsonrpsee::core::client::SubscriptionClientT`] to +/// [`ethers::providers::PubsubClient`]. pub struct EthPubsubAdapter { pub(crate) adapter: EthClientAdapter, pub(crate) eth_subscriptions: Arc>, @@ -61,10 +65,7 @@ where C: Clone, { fn clone(&self) -> Self { - Self { - adapter: self.adapter.clone(), - eth_subscriptions: self.eth_subscriptions.clone(), - } + Self { adapter: self.adapter.clone(), eth_subscriptions: self.eth_subscriptions.clone() } } } @@ -72,13 +73,13 @@ impl Deref for EthPubsubAdapter { type Target = C; fn deref(&self) -> &Self::Target { - self.adapter.deref() + &self.adapter } } impl DerefMut for EthPubsubAdapter { fn deref_mut(&mut self) -> &mut Self::Target { - self.adapter.deref_mut() + &mut self.adapter } } @@ -87,12 +88,15 @@ where C: SubscriptionClientT + Debug + Send + Sync, { pub fn new(client: C) -> Self { - Self { - adapter: EthClientAdapter::new(client), - eth_subscriptions: Arc::new(DashMap::new()), - } + Self { adapter: EthClientAdapter::new(client), eth_subscriptions: Arc::new(DashMap::new()) } } + /// # Errors + /// + /// Will return `Err` when: + /// * when `R` is not an `U256` + /// * when the client RPC fails to send the message + /// * when the client returns an invalid subscription id pub async fn eth_subscribe(&self, params: P) -> Result where R: DeserializeOwned + Send, @@ -115,11 +119,8 @@ where .ok() .and_then(|value| serde_json::from_value::(value).ok()); - let id = if let Some(id) = maybe_subscription_id { - id - } else { - // id is not a valid U256, convert str to bytes - match id { + let id = maybe_subscription_id.map_or_else( + || match id { SubscriptionId::Num(id) => U256::from(*id), SubscriptionId::Str(id) => { let str_bytes = id.as_bytes(); @@ -127,11 +128,12 @@ where let size = usize::min(str_bytes.len(), bytes.len()); bytes[0..size].copy_from_slice(str_bytes); U256::from_big_endian(bytes.as_slice()) - } - } - }; + }, + }, + |id| id, + ); Some(id) - } + }, _ => None, } .and_then(|subscription_id| { @@ -156,6 +158,9 @@ where Ok(result) } + /// # Errors + /// + /// Will return `Err(EthError)` when the client fails to unsubscribe pub async fn eth_unsubscribe(&self, params: EthRpcParams) -> Result where R: DeserializeOwned + Send, @@ -194,9 +199,7 @@ where match method { ETHEREUM_SUBSCRIBE_METHOD => self.eth_subscribe(params).await, ETHEREUM_UNSUBSCRIBE_METHOD => self.eth_unsubscribe(params).await, - _ => ClientT::request(&self.adapter, method, params) - .await - .map_err(EthError::from), + _ => ClientT::request(&self.adapter, method, params).await.map_err(EthError::from), } } } @@ -224,9 +227,9 @@ where } /// Remove a subscription from this transport - fn unsubscribe>(&self, _id: T) -> Result<(), Self::Error> { + fn unsubscribe>(&self, id: T) -> Result<(), Self::Error> { self.eth_subscriptions - .remove(&_id.into()) + .remove(&id.into()) .map(|_| ()) .ok_or_else(|| EthError::JsonRpsee { original: JsonRpseeError::InvalidSubscriptionId, @@ -236,7 +239,7 @@ where } #[derive(Debug)] -pub(crate) enum SubscriptionState { +pub enum SubscriptionState { Pending(Subscription), Subscribed(Arc), Unsubscribed, @@ -244,30 +247,30 @@ pub(crate) enum SubscriptionState { impl SubscriptionState { fn subscribe(&mut self, id: U256) -> Option { - let old_state = std::mem::replace(self, SubscriptionState::Unsubscribed); + let old_state = std::mem::replace(self, Self::Unsubscribed); match old_state { - SubscriptionState::Pending(stream) => { + Self::Pending(stream) => { let unsubscribe = Arc::new(AtomicBool::new(false)); - *self = SubscriptionState::Subscribed(unsubscribe.clone()); + *self = Self::Subscribed(unsubscribe.clone()); Some(EthSubscription::new(id, stream, unsubscribe)) - } - SubscriptionState::Subscribed(unsubscribe) => { - *self = SubscriptionState::Subscribed(unsubscribe); + }, + Self::Subscribed(unsubscribe) => { + *self = Self::Subscribed(unsubscribe); None - } - SubscriptionState::Unsubscribed => None, + }, + Self::Unsubscribed => None, } } async fn unsubscribe(&mut self) -> Result<(), JsonRpseeError> { - let old_state = std::mem::replace(self, SubscriptionState::Unsubscribed); + let old_state = std::mem::replace(self, Self::Unsubscribed); match old_state { - SubscriptionState::Pending(stream) => stream.unsubscribe().await, - SubscriptionState::Subscribed(unsubscribe) => { + Self::Pending(stream) => stream.unsubscribe().await, + Self::Subscribed(unsubscribe) => { unsubscribe.store(true, Ordering::SeqCst); Ok(()) - } - SubscriptionState::Unsubscribed => Ok(()), + }, + Self::Unsubscribed => Ok(()), } } } diff --git a/chains/ethereum/rpc-client/src/subscription.rs b/chains/ethereum/rpc-client/src/subscription.rs index ed61e8a7..aa0fb0a3 100644 --- a/chains/ethereum/rpc-client/src/subscription.rs +++ b/chains/ethereum/rpc-client/src/subscription.rs @@ -1,14 +1,16 @@ use ethers::types::U256; -use futures_util::future::BoxFuture; -use futures_util::{FutureExt, Stream, StreamExt}; -use jsonrpsee::core::client::Subscription; -use jsonrpsee::core::error::Error as JsonRpseeError; +use futures_util::{future::BoxFuture, FutureExt, Stream, StreamExt}; +use jsonrpsee::core::{client::Subscription, error::Error as JsonRpseeError}; use pin_project::pin_project; use serde_json::value::RawValue; -use std::pin::Pin; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; -use std::task::{Context, Poll}; +use std::{ + pin::Pin, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + task::{Context, Poll}, +}; /// Max number of failures in sequence before unsubscribing /// A Failure occur when the publisher submits an invalid json @@ -21,7 +23,8 @@ enum EthSubscriptionState { Unsubscribing(BoxFuture<'static, Result<(), JsonRpseeError>>), } -/// Adapter for [`jsonrpsee::core::client::Subscription`] to EventStream from [`ethers::providers::PubsubClient`]. +/// Adapter for [`jsonrpsee::core::client::Subscription`] to EventStream from +/// [`ethers::providers::PubsubClient`]. #[pin_project(project = SubscriptionStreamProj)] pub struct EthSubscription { id: U256, @@ -62,21 +65,20 @@ impl Stream for EthSubscription { Some(EthSubscriptionState::Idle(stream)) => { if this.should_unsubscribe.load(Ordering::SeqCst) { tracing::info!("unsubscribing..."); - *this.state = Some(EthSubscriptionState::Unsubscribing( - stream.unsubscribe().boxed(), - )); + *this.state = + Some(EthSubscriptionState::Unsubscribing(stream.unsubscribe().boxed())); } else { *this.state = Some(EthSubscriptionState::Receiving(stream)); } continue; - } + }, Some(EthSubscriptionState::Receiving(mut stream)) => { let result = match stream.poll_next_unpin(cx) { Poll::Ready(result) => result, Poll::Pending => { *this.state = Some(EthSubscriptionState::Receiving(stream)); return Poll::Pending; - } + }, }; // Stream is close, no unsubscribe needed @@ -94,7 +96,7 @@ impl Stream for EthSubscription { Ok(value) => { *this.state = Some(EthSubscriptionState::Idle(stream)); return Poll::Ready(Some(value)); - } + }, Err(error) => { *this.failure_count += 1; this.span.record("failures", *this.failure_count); @@ -119,26 +121,26 @@ impl Stream for EthSubscription { *this.state = Some(EthSubscriptionState::Idle(stream)); } continue; - } + }, } - } + }, Some(EthSubscriptionState::Unsubscribing(mut future)) => { return match future.poll_unpin(cx) { Poll::Ready(Ok(_)) => Poll::Ready(None), Poll::Ready(Err(error)) => { tracing::error!("Failed to unsubscribe: {:?}", error); Poll::Ready(None) - } + }, Poll::Pending => { *this.state = Some(EthSubscriptionState::Unsubscribing(future)); Poll::Pending - } - }; - } + }, + } + }, None => { tracing::error!("stream must not be polled after being closed`"); return Poll::Ready(None); - } + }, } } } diff --git a/chains/ethereum/server/src/client.rs b/chains/ethereum/server/src/client.rs index bf98d205..f8c0d850 100644 --- a/chains/ethereum/server/src/client.rs +++ b/chains/ethereum/server/src/client.rs @@ -1,11 +1,12 @@ use crate::{event_stream::EthereumEventStream, proof::verify_proof}; use anyhow::{bail, Context, Result}; use ethabi::token::{LenientTokenizer, Tokenizer}; -use ethers::abi::{Detokenize, HumanReadableParser, InvalidOutputType, Token}; -use ethers::prelude::*; -use ethers::providers::{JsonRpcClient, Middleware, Provider}; -use ethers::utils::keccak256; -use ethers::utils::rlp::Encodable; +use ethers::{ + abi::{Detokenize, HumanReadableParser, InvalidOutputType, Token}, + prelude::*, + providers::{JsonRpcClient, Middleware, Provider}, + utils::{keccak256, rlp::Encodable}, +}; use rosetta_config_ethereum::{EthereumMetadata, EthereumMetadataParams}; use rosetta_core::{ crypto::{address::Address, PublicKey}, @@ -16,8 +17,17 @@ use rosetta_core::{ BlockchainConfig, }; use serde_json::{json, Value}; -use std::str::FromStr; -use std::sync::Arc; +use std::{str::FromStr, sync::Arc}; + +struct Detokenizer { + tokens: Vec, +} + +impl Detokenize for Detokenizer { + fn from_tokens(tokens: Vec) -> Result { + Ok(Self { tokens }) + } +} pub struct EthereumClient

{ config: BlockchainConfig, @@ -41,26 +51,20 @@ where { pub async fn new(config: BlockchainConfig, rpc_client: P) -> Result { let client = Arc::new(Provider::new(rpc_client)); - let genesis = client - .get_block(0) - .await? - .context("Failed to get genesis block")?; - let genesis_block = BlockIdentifier { - index: 0, - hash: hex::encode(genesis.hash.as_ref().unwrap()), + let Some(genesis_hash) = + client.get_block(0).await?.context("Failed to get genesis block")?.hash + else { + anyhow::bail!("FATAL: genesis block doesn't have hash"); }; - Ok(Self { - config, - client, - genesis_block, - }) + let genesis_block = BlockIdentifier { index: 0, hash: hex::encode(genesis_hash) }; + Ok(Self { config, client, genesis_block }) } - pub fn config(&self) -> &BlockchainConfig { + pub const fn config(&self) -> &BlockchainConfig { &self.config } - pub fn genesis_block(&self) -> &BlockIdentifier { + pub const fn genesis_block(&self) -> &BlockIdentifier { &self.genesis_block } @@ -70,24 +74,18 @@ where pub async fn current_block(&self) -> Result { let index = self.client.get_block_number().await?.as_u64(); - let block = self - .client - .get_block(index) - .await? - .context("missing block")?; - Ok(BlockIdentifier { - index, - hash: hex::encode(block.hash.as_ref().unwrap()), - }) + let Some(block_hash) = self.client.get_block(index).await?.context("missing block")?.hash + else { + anyhow::bail!("FATAL: block hash is missing"); + }; + Ok(BlockIdentifier { index, hash: hex::encode(block_hash) }) } pub async fn finalized_block(&self) -> Result { // TODO: ISSUE-176 Create a new connector for polygon let block = if self.config.blockchain == "polygon" { - let Some(latest_block) = self - .client - .get_block(BlockId::Number(BlockNumber::Latest)) - .await? + let Some(latest_block) = + self.client.get_block(BlockId::Number(BlockNumber::Latest)).await? else { return Ok(self.genesis_block.clone()); }; @@ -114,10 +112,8 @@ where }; finalized_block - } else if let Some(finalized_block) = self - .client - .get_block(BlockId::Number(BlockNumber::Finalized)) - .await? + } else if let Some(finalized_block) = + self.client.get_block(BlockId::Number(BlockNumber::Finalized)).await? { finalized_block } else { @@ -126,7 +122,7 @@ where Ok(BlockIdentifier { index: block.number.context("Block is pending")?.as_u64(), - hash: hex::encode(block.hash.as_ref().unwrap()), + hash: hex::encode(block.hash.context("Block is pending")?), }) } @@ -142,6 +138,7 @@ where .as_u128()) } + #[allow(clippy::unused_async)] pub async fn coins(&self, _address: &Address, _block: &BlockIdentifier) -> Result> { anyhow::bail!("not a utxo chain"); } @@ -150,17 +147,14 @@ where // first account will be the coinbase account on a dev net let coinbase = self.client.get_accounts().await?[0]; let address: H160 = address.address().parse()?; - let tx = TransactionRequest::new() - .to(address) - .value(param) - .from(coinbase); + let tx = TransactionRequest::new().to(address).value(param).from(coinbase); Ok(self .client .send_transaction(tx, None) .await? .confirmations(2) .await? - .unwrap() + .context("failed to retrieve tx receipt")? .transaction_hash .0 .to_vec()) @@ -171,10 +165,7 @@ where public_key: &PublicKey, options: &EthereumMetadataParams, ) -> Result { - let from: H160 = public_key - .to_address(self.config().address_format) - .address() - .parse()?; + let from: H160 = public_key.to_address(self.config().address_format).address().parse()?; let to: Option = if options.destination.len() >= 20 { Some(H160::from_slice(&options.destination).into()) } else { @@ -219,11 +210,9 @@ where let block_id = if let Some(hash) = block_identifier.hash.as_ref() { BlockId::Hash(H256::from_str(hash)?) } else { - let index = if let Some(index) = block_identifier.index { - BlockNumber::Number(U64::from(index)) - } else { - BlockNumber::Latest - }; + let index = block_identifier + .index + .map_or(BlockNumber::Latest, |index| BlockNumber::Number(U64::from(index))); BlockId::Number(index) }; let block = self @@ -231,10 +220,7 @@ where .get_block_with_txs(block_id) .await .map_err(|error| { - anyhow::anyhow!( - "Failed to get block with transactions: {}", - error.to_string() - ) + anyhow::anyhow!("Failed to get block with transactions: {}", error.to_string()) })? .context("block not found")?; let block_number = block.number.context("Unable to fetch block number")?; @@ -244,9 +230,13 @@ where crate::utils::block_reward_transaction(&self.client, self.config(), &block).await?; transactions.push(block_reward_transaction); for transaction in &block.transactions { - let transaction = - crate::utils::get_transaction(&self.client, self.config(), &block, transaction) - .await?; + let transaction = crate::utils::get_transaction( + &self.client, + self.config(), + block.clone(), + transaction, + ) + .await?; transactions.push(transaction); } Ok(Block { @@ -258,7 +248,7 @@ where index: block_number.as_u64().saturating_sub(1), hash: hex::encode(block.parent_hash), }, - timestamp: block.timestamp.as_u64() as i64, + timestamp: i64::try_from(block.timestamp.as_u64()).context("timestamp overflow")?, transactions, metadata: None, }) @@ -275,17 +265,14 @@ where .get_block(BlockId::Hash(H256::from_str(&block.hash)?)) .await? .context("block not found")?; - let transaction = self - .client - .get_transaction(tx_id) - .await? - .context("transaction not found")?; let transaction = - crate::utils::get_transaction(&self.client, self.config(), &block, &transaction) - .await?; + self.client.get_transaction(tx_id).await?.context("transaction not found")?; + let transaction = + crate::utils::get_transaction(&self.client, self.config(), block, &transaction).await?; Ok(transaction) } + #[allow(clippy::too_many_lines)] pub async fn call(&self, req: &CallRequest) -> Result { let call_details = req.method.split('-').collect::>(); if call_details.len() != 3 { @@ -302,9 +289,7 @@ where if let Some(block_hash) = block_identifier.hash.as_ref() { return BlockId::from_str(block_hash).map_err(|e| anyhow::anyhow!("{e}")); } else if let Some(block_number) = block_identifier.index { - return Ok(BlockId::Number(BlockNumber::Number(U64::from( - block_number, - )))); + return Ok(BlockId::Number(BlockNumber::Number(U64::from(block_number)))); }; bail!("invalid block identifier") }) @@ -333,14 +318,6 @@ where let tx = &tx.into(); let received_data = self.client.call(tx, block_id).await?; - struct Detokenizer { - tokens: Vec, - } - impl Detokenize for Detokenizer { - fn from_tokens(tokens: Vec) -> Result { - Ok(Self { tokens }) - } - } let detokenizer: Detokenizer = decode_function_data(&function, received_data, false)?; let mut result = Vec::with_capacity(tokens.len()); @@ -348,40 +325,36 @@ where result.push(token.to_string()); } Ok(serde_json::to_value(result)?) - } + }, "storage" => { //process storage call let from = H160::from_str(contract_address)?; let location = H256::from_str(method_or_position)?; - // TODO: remove the params["block_number"], use block_identifier instead, leaving it here for compatibility + // TODO: remove the params["block_number"], use block_identifier instead, leaving it + // here for compatibility let block_num = params["block_number"] .as_u64() .map(|block_num| BlockId::Number(block_num.into())) .or(block_id); - let storage_check = self - .client - .get_storage_at(from, location, block_num) - .await?; + let storage_check = self.client.get_storage_at(from, location, block_num).await?; Ok(Value::String(format!("{storage_check:#?}",))) - } + }, "storage_proof" => { let from = H160::from_str(contract_address)?; let location = H256::from_str(method_or_position)?; - // TODO: remove the params["block_number"], use block_identifier instead, leaving it here for compatibility + // TODO: remove the params["block_number"], use block_identifier instead, leaving it + // here for compatibility let block_num = params["block_number"] .as_u64() .map(|block_num| BlockId::Number(block_num.into())) .or(block_id); - let proof_data = self - .client - .get_proof(from, vec![location], block_num) - .await?; + let proof_data = self.client.get_proof(from, vec![location], block_num).await?; //process verfiicatin of proof let storage_hash = proof_data.storage_hash; @@ -404,7 +377,7 @@ where "proof": result, "isValid": is_valid })) - } + }, "transaction_receipt" => { let tx_hash = H256::from_str(contract_address)?; let receipt = self.client.get_transaction_receipt(tx_hash).await?; @@ -413,10 +386,10 @@ where bail!("block identifier is ignored for transaction receipt"); } Ok(result) - } + }, _ => { bail!("request type not supported") - } + }, } } } @@ -427,9 +400,6 @@ where { pub async fn listen(&self) -> Result> { let new_head_subscription = self.client.subscribe_blocks().await?; - Ok(EthereumEventStream::new( - Arc::clone(&self.client), - new_head_subscription, - )) + Ok(EthereumEventStream::new(Arc::clone(&self.client), new_head_subscription)) } } diff --git a/chains/ethereum/server/src/eth_types.rs b/chains/ethereum/server/src/eth_types.rs index a4d1976b..4b9ab251 100644 --- a/chains/ethereum/server/src/eth_types.rs +++ b/chains/ethereum/server/src/eth_types.rs @@ -22,24 +22,20 @@ pub const MAX_UNCLE_DEPTH: u64 = 8; pub const _TRANSFER_GAS_LIMIT: u64 = 21000; -pub const FRONTIER_BLOCK_REWARD: u64 = 5000000000000000000; -pub const BYZANTIUM_BLOCK_REWARD: u64 = 3000000000000000000; -pub const CONSTANTINOPLE_BLOCK_REWARD: u64 = 2000000000000000000; +pub const FRONTIER_BLOCK_REWARD: u64 = 5_000_000_000_000_000_000; +pub const BYZANTIUM_BLOCK_REWARD: u64 = 3_000_000_000_000_000_000; +pub const CONSTANTINOPLE_BLOCK_REWARD: u64 = 2_000_000_000_000_000_000; pub struct ChainConfig { pub byzantium_block: u64, pub constantinople_block: u64, } -pub const _MAINNET_CHAIN_CONFIG: ChainConfig = ChainConfig { - byzantium_block: 4370000, - constantinople_block: 7280000, -}; +pub const _MAINNET_CHAIN_CONFIG: ChainConfig = + ChainConfig { byzantium_block: 4_370_000, constantinople_block: 7_280_000 }; -pub const TESTNET_CHAIN_CONFIG: ChainConfig = ChainConfig { - byzantium_block: 0, - constantinople_block: 0, -}; +pub const TESTNET_CHAIN_CONFIG: ChainConfig = + ChainConfig { byzantium_block: 0, constantinople_block: 0 }; #[derive(Deserialize, Serialize, Clone, Debug, Eq, PartialEq)] pub struct Trace { diff --git a/chains/ethereum/server/src/event_stream.rs b/chains/ethereum/server/src/event_stream.rs index 926dbf26..3c28d711 100644 --- a/chains/ethereum/server/src/event_stream.rs +++ b/chains/ethereum/server/src/event_stream.rs @@ -1,11 +1,7 @@ -use ethers::prelude::*; -use ethers::providers::PubsubClient; -use futures_util::future::BoxFuture; -use futures_util::FutureExt; +use ethers::{prelude::*, providers::PubsubClient}; +use futures_util::{future::BoxFuture, FutureExt}; use rosetta_core::{stream::Stream, types::BlockIdentifier, BlockOrIdentifier, ClientEvent}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::Poll; +use std::{pin::Pin, sync::Arc, task::Poll}; // Maximum number of failures in sequence before closing the stream const FAILURE_THRESHOLD: u32 = 10; @@ -19,8 +15,8 @@ pub struct EthereumEventStream<'a, P: PubsubClient> { pub finalized_block_failures: u32, /// Count the number of failed attempts to retrieve the latest block pub latest_block_failures: u32, - /// Cache the best finalized block, we use this to avoid emitting two [`ClientEvent::NewFinalized`] - /// for the same block + /// Cache the best finalized block, we use this to avoid emitting two + /// [`ClientEvent::NewFinalized`] for the same block pub best_finalized_block: Option, /// Ethereum client doesn't support subscribing for finalized blocks, as workaround /// everytime we receive a new head, we query the latest finalized block @@ -49,24 +45,15 @@ where fn finalized_block(&self) -> BoxFuture<'static, Result>, ProviderError>> { // Clone client to make BoxFuture 'static let client = Arc::clone(&self.client); - async move { - client - .get_block(BlockId::Number(BlockNumber::Finalized)) - .await - } - .boxed() + async move { client.get_block(BlockId::Number(BlockNumber::Finalized)).await }.boxed() } } /// Converts [`Block`] to [`BlockIdentifier`] -fn block_to_identifier(block: Block) -> Result { - let Some(number) = block.number else { - return Err("block number is missing"); - }; +fn block_to_identifier(block: &Block) -> Result { + let Some(number) = block.number else { return Err("block number is missing") }; - let Some(hash) = block.hash else { - return Err("block hash is missing"); - }; + let Some(hash) = block.hash else { return Err("block hash is missing") }; Ok(BlockIdentifier::new(number.as_u64(), hex::encode(hash))) } @@ -97,13 +84,13 @@ where match finalized_block_future.poll_unpin(cx) { Poll::Ready(Ok(Some(block))) => { // Convert raw block to block identifier - let block_identifier = match block_to_identifier(block) { + let block_identifier = match block_to_identifier(&block) { Ok(block_identifier) => block_identifier, Err(error) => { this.finalized_block_failures += 1; tracing::error!("finalized block: {error}"); break; - } + }, }; // Reset failure counter @@ -125,25 +112,25 @@ where return Poll::Ready(Some(ClientEvent::NewFinalized( BlockOrIdentifier::Identifier(block_identifier), ))); - } + }, Poll::Ready(Ok(None)) => { // Retry to retrieve the latest finalized block. this.finalized_block_future = Some(this.finalized_block()); tracing::error!("finalized block not found"); this.finalized_block_failures += 1; continue; - } + }, Poll::Ready(Err(error)) => { // Retry to retrieve the latest finalized block. this.finalized_block_future = Some(this.finalized_block()); tracing::error!("failed to retrieve finalized block: {error:?}"); this.finalized_block_failures += 1; continue; - } + }, Poll::Pending => { this.finalized_block_future = Some(finalized_block_future); break; - } + }, } } } @@ -159,13 +146,13 @@ where match this.new_head.poll_next_unpin(cx) { Poll::Ready(Some(block)) => { // Convert raw block to block identifier - let block_identifier = match block_to_identifier(block) { + let block_identifier = match block_to_identifier(&block) { Ok(block_identifier) => block_identifier, Err(error) => { this.latest_block_failures += 1; tracing::error!("latest block: {error}"); continue; - } + }, }; // Reset failure counter @@ -179,7 +166,7 @@ where return Poll::Ready(Some(ClientEvent::NewHead(BlockOrIdentifier::Identifier( block_identifier, )))); - } + }, Poll::Ready(None) => return Poll::Ready(None), Poll::Pending => return Poll::Pending, }; diff --git a/chains/ethereum/server/src/lib.rs b/chains/ethereum/server/src/lib.rs index 73af2759..2f8eba05 100644 --- a/chains/ethereum/server/src/lib.rs +++ b/chains/ethereum/server/src/lib.rs @@ -31,7 +31,16 @@ pub enum MaybeWsEthereumClient { } impl MaybeWsEthereumClient { - pub async fn new>(blockchain: &str, network: &str, addr: S) -> Result { + /// Creates a new ethereum client from `network` and `addr`. + /// Supported blockchains are `ethereum` and `polygon` + /// + /// # Errors + /// Will return `Err` when the network is invalid, or when the provided `addr` is unreacheable. + pub async fn new + Send>( + blockchain: &str, + network: &str, + addr: S, + ) -> Result { let config = match blockchain { "polygon" => rosetta_config_ethereum::polygon_config(network)?, "ethereum" => rosetta_config_ethereum::config(network)?, @@ -40,7 +49,14 @@ impl MaybeWsEthereumClient { Self::from_config(config, addr).await } - pub async fn from_config>(config: BlockchainConfig, addr: S) -> Result { + /// Creates a new bitcoin client from `config` and `addr` + /// + /// # Errors + /// Will return `Err` when the network is invalid, or when the provided `addr` is unreacheable. + pub async fn from_config + Send>( + config: BlockchainConfig, + addr: S, + ) -> Result { let uri = Url::parse(addr.as_ref())?; if uri.scheme() == "ws" || uri.scheme() == "wss" { let client = default_client(uri.as_str(), None).await?; @@ -52,6 +68,11 @@ impl MaybeWsEthereumClient { } } + /// Creates a new Ethereum Client from the provided `JsonRpsee` client, + /// this method is useful for reusing the same rpc client for ethereum and substrate calls. + /// + /// # Errors + /// Will return `Err` when the network is invalid, or when the provided `addr` is unreacheable. pub async fn from_jsonrpsee(config: BlockchainConfig, client: DefaultClient) -> Result { let ws_connection = EthPubsubAdapter::new(client); let client = EthereumClient::new(config, ws_connection).await?; @@ -67,57 +88,57 @@ impl BlockchainClient for MaybeWsEthereumClient { fn config(&self) -> &BlockchainConfig { match self { - MaybeWsEthereumClient::Http(http_client) => http_client.config(), - MaybeWsEthereumClient::Ws(ws_client) => ws_client.config(), + Self::Http(http_client) => http_client.config(), + Self::Ws(ws_client) => ws_client.config(), } } fn genesis_block(&self) -> &BlockIdentifier { match self { - MaybeWsEthereumClient::Http(http_client) => http_client.genesis_block(), - MaybeWsEthereumClient::Ws(ws_client) => ws_client.genesis_block(), + Self::Http(http_client) => http_client.genesis_block(), + Self::Ws(ws_client) => ws_client.genesis_block(), } } async fn node_version(&self) -> Result { match self { - MaybeWsEthereumClient::Http(http_client) => http_client.node_version().await, - MaybeWsEthereumClient::Ws(ws_client) => ws_client.node_version().await, + Self::Http(http_client) => http_client.node_version().await, + Self::Ws(ws_client) => ws_client.node_version().await, } } async fn current_block(&self) -> Result { match self { - MaybeWsEthereumClient::Http(http_client) => http_client.current_block().await, - MaybeWsEthereumClient::Ws(ws_client) => ws_client.current_block().await, + Self::Http(http_client) => http_client.current_block().await, + Self::Ws(ws_client) => ws_client.current_block().await, } } async fn finalized_block(&self) -> Result { match self { - MaybeWsEthereumClient::Http(http_client) => http_client.finalized_block().await, - MaybeWsEthereumClient::Ws(ws_client) => ws_client.finalized_block().await, + Self::Http(http_client) => http_client.finalized_block().await, + Self::Ws(ws_client) => ws_client.finalized_block().await, } } async fn balance(&self, address: &Address, block: &BlockIdentifier) -> Result { match self { - MaybeWsEthereumClient::Http(http_client) => http_client.balance(address, block).await, - MaybeWsEthereumClient::Ws(ws_client) => ws_client.balance(address, block).await, + Self::Http(http_client) => http_client.balance(address, block).await, + Self::Ws(ws_client) => ws_client.balance(address, block).await, } } async fn coins(&self, address: &Address, block: &BlockIdentifier) -> Result> { match self { - MaybeWsEthereumClient::Http(http_client) => http_client.coins(address, block).await, - MaybeWsEthereumClient::Ws(ws_client) => ws_client.coins(address, block).await, + Self::Http(http_client) => http_client.coins(address, block).await, + Self::Ws(ws_client) => ws_client.coins(address, block).await, } } async fn faucet(&self, address: &Address, param: u128) -> Result> { match self { - MaybeWsEthereumClient::Http(http_client) => http_client.faucet(address, param).await, - MaybeWsEthereumClient::Ws(ws_client) => ws_client.faucet(address, param).await, + Self::Http(http_client) => http_client.faucet(address, param).await, + Self::Ws(ws_client) => ws_client.faucet(address, param).await, } } @@ -127,24 +148,22 @@ impl BlockchainClient for MaybeWsEthereumClient { options: &Self::MetadataParams, ) -> Result { match self { - MaybeWsEthereumClient::Http(http_client) => { - http_client.metadata(public_key, options).await - } - MaybeWsEthereumClient::Ws(ws_client) => ws_client.metadata(public_key, options).await, + Self::Http(http_client) => http_client.metadata(public_key, options).await, + Self::Ws(ws_client) => ws_client.metadata(public_key, options).await, } } async fn submit(&self, transaction: &[u8]) -> Result> { match self { - MaybeWsEthereumClient::Http(http_client) => http_client.submit(transaction).await, - MaybeWsEthereumClient::Ws(ws_client) => ws_client.submit(transaction).await, + Self::Http(http_client) => http_client.submit(transaction).await, + Self::Ws(ws_client) => ws_client.submit(transaction).await, } } async fn block(&self, block_identifier: &PartialBlockIdentifier) -> Result { match self { - MaybeWsEthereumClient::Http(http_client) => http_client.block(block_identifier).await, - MaybeWsEthereumClient::Ws(ws_client) => ws_client.block(block_identifier).await, + Self::Http(http_client) => http_client.block(block_identifier).await, + Self::Ws(ws_client) => ws_client.block(block_identifier).await, } } @@ -154,27 +173,25 @@ impl BlockchainClient for MaybeWsEthereumClient { tx: &TransactionIdentifier, ) -> Result { match self { - MaybeWsEthereumClient::Http(http_client) => { - http_client.block_transaction(block, tx).await - } - MaybeWsEthereumClient::Ws(ws_client) => ws_client.block_transaction(block, tx).await, + Self::Http(http_client) => http_client.block_transaction(block, tx).await, + Self::Ws(ws_client) => ws_client.block_transaction(block, tx).await, } } async fn call(&self, req: &CallRequest) -> Result { match self { - MaybeWsEthereumClient::Http(http_client) => http_client.call(req).await, - MaybeWsEthereumClient::Ws(ws_client) => ws_client.call(req).await, + Self::Http(http_client) => http_client.call(req).await, + Self::Ws(ws_client) => ws_client.call(req).await, } } async fn listen<'a>(&'a self) -> Result>> { match self { - MaybeWsEthereumClient::Http(_) => Ok(None), - MaybeWsEthereumClient::Ws(ws_client) => { + Self::Http(_) => Ok(None), + Self::Ws(ws_client) => { let subscription = ws_client.listen().await?; Ok(Some(subscription)) - } + }, } } } @@ -182,12 +199,10 @@ impl BlockchainClient for MaybeWsEthereumClient { #[cfg(test)] mod tests { use super::*; - use ethers_solc::artifacts::Source; - use ethers_solc::{CompilerInput, EvmVersion, Solc}; + use ethers_solc::{artifacts::Source, CompilerInput, EvmVersion, Solc}; use rosetta_docker::Env; use sha3::Digest; - use std::collections::BTreeMap; - use std::path::Path; + use std::{collections::BTreeMap, path::Path}; pub async fn client_from_config(config: BlockchainConfig) -> Result { let url = config.node_uri.to_string(); @@ -250,12 +265,7 @@ mod tests { async fn test_smart_contract() -> Result<()> { let config = rosetta_config_ethereum::config("dev")?; - let env = Env::new( - "ethereum-smart-contract", - config.clone(), - client_from_config, - ) - .await?; + let env = Env::new("ethereum-smart-contract", config.clone(), client_from_config).await?; let faucet = 100 * u128::pow(10, config.currency_decimals); let wallet = env.ephemeral_wallet().await?; @@ -272,15 +282,12 @@ mod tests { let tx_hash = wallet.eth_deploy_contract(bytes).await?; let receipt = wallet.eth_transaction_receipt(&tx_hash).await?; - let contract_address = receipt - .get("contractAddress") - .and_then(|v| v.as_str()) - .unwrap(); - let tx_hash = wallet - .eth_send_call(contract_address, "function emitEvent()", &[], 0) - .await?; + let contract_address = + receipt.get("contractAddress").and_then(serde_json::Value::as_str).unwrap(); + let tx_hash = + wallet.eth_send_call(contract_address, "function emitEvent()", &[], 0).await?; let receipt = wallet.eth_transaction_receipt(&tx_hash).await?; - let logs = receipt.get("logs").and_then(|v| v.as_array()).unwrap(); + let logs = receipt.get("logs").and_then(serde_json::Value::as_array).unwrap(); assert_eq!(logs.len(), 1); let topic = logs[0]["topics"][0].as_str().unwrap(); let expected = format!("0x{}", hex::encode(sha3::Keccak256::digest("AnEvent()"))); @@ -293,12 +300,8 @@ mod tests { async fn test_smart_contract_view() -> Result<()> { let config = rosetta_config_ethereum::config("dev")?; - let env = Env::new( - "ethereum-smart-contract-view", - config.clone(), - client_from_config, - ) - .await?; + let env = + Env::new("ethereum-smart-contract-view", config.clone(), client_from_config).await?; let faucet = 100 * u128::pow(10, config.currency_decimals); let wallet = env.ephemeral_wallet().await?; diff --git a/chains/ethereum/server/src/proof.rs b/chains/ethereum/server/src/proof.rs index d111a540..c8150d44 100644 --- a/chains/ethereum/server/src/proof.rs +++ b/chains/ethereum/server/src/proof.rs @@ -1,6 +1,10 @@ -use ethers::types::{Bytes, EIP1186ProofResponse}; -use ethers::utils::keccak256; -use ethers::utils::rlp::{decode_list, RlpStream}; +use ethers::{ + types::{Bytes, EIP1186ProofResponse}, + utils::{ + keccak256, + rlp::{decode_list, RlpStream}, + }, +}; pub fn verify_proof(proof: &Vec, root: &[u8], path: &Vec, value: &Vec) -> bool { let mut expected_hash = root.to_vec(); @@ -14,25 +18,22 @@ pub fn verify_proof(proof: &Vec, root: &[u8], path: &Vec, value: &Vec let node_list: Vec> = decode_list(node); if node_list.len() == 17 { + // exclusion proof + let nibble = get_nibble(path, path_offset); if i == proof.len() - 1 { - // exclusion proof - let nibble = get_nibble(path, path_offset); let node = &node_list[nibble as usize]; - if node.is_empty() && is_empty_value(value) { return true; } } else { - let nibble = get_nibble(path, path_offset); expected_hash = node_list[nibble as usize].clone(); - path_offset += 1; } } else if node_list.len() == 2 { if i == proof.len() - 1 { // exclusion proof - if !paths_match(&node_list[0], skip_length(&node_list[0]), path, path_offset) - && is_empty_value(value) + if !paths_match(&node_list[0], skip_length(&node_list[0]), path, path_offset) && + is_empty_value(value) { return true; } @@ -95,6 +96,7 @@ fn get_rest_path(p: &Vec, s: usize) -> String { ret } +#[allow(clippy::unwrap_used)] fn is_empty_value(value: &Vec) -> bool { let mut stream = RlpStream::new(); stream.begin_list(4); @@ -114,10 +116,7 @@ fn is_empty_value(value: &Vec) -> bool { fn shared_prefix_length(path: &Vec, path_offset: usize, node_path: &Vec) -> usize { let skip_length = skip_length(node_path); - let len = std::cmp::min( - node_path.len() * 2 - skip_length, - path.len() * 2 - path_offset, - ); + let len = std::cmp::min(node_path.len() * 2 - skip_length, path.len() * 2 - path_offset); let mut prefix_len = 0; for i in 0..len { @@ -141,15 +140,13 @@ fn skip_length(node: &Vec) -> usize { let nibble = get_nibble(node, 0); match nibble { - 0 => 2, - 1 => 1, - 2 => 2, - 3 => 1, + 0 | 2 => 2, + 1 | 3 => 1, _ => 0, } } -fn get_nibble(path: &[u8], offset: usize) -> u8 { +const fn get_nibble(path: &[u8], offset: usize) -> u8 { let byte = path[offset / 2]; if offset % 2 == 0 { byte >> 4 diff --git a/chains/ethereum/server/src/utils.rs b/chains/ethereum/server/src/utils.rs index b837317c..bffdab74 100644 --- a/chains/ethereum/server/src/utils.rs +++ b/chains/ethereum/server/src/utils.rs @@ -5,58 +5,68 @@ use crate::eth_types::{ SUCCESS_STATUS, TESTNET_CHAIN_CONFIG, UNCLE_REWARD_MULTIPLIER, UNCLE_REWARD_OP_TYPE, }; use anyhow::{bail, Context, Result}; -use ethers::{prelude::*, utils::to_checksum}; use ethers::{ + prelude::*, providers::Middleware, types::{Block, Transaction, TransactionReceipt, H160, H256, U256, U64}, + utils::to_checksum, }; -use rosetta_core::types as rosetta_types; -use rosetta_core::types::{ - AccountIdentifier, Amount, Currency, Operation, OperationIdentifier, TransactionIdentifier, +use rosetta_core::{ + types as rosetta_types, + types::{ + AccountIdentifier, Amount, Currency, Operation, OperationIdentifier, TransactionIdentifier, + }, + BlockchainConfig, }; -use rosetta_core::BlockchainConfig; use serde_json::json; -use std::collections::{HashMap, VecDeque}; -use std::str::FromStr; +use std::{ + collections::{HashMap, VecDeque}, + str::FromStr, +}; -pub async fn get_transaction( +pub async fn get_transaction( client: &Provider

, config: &BlockchainConfig, - block: &Block, + block: Block, tx: &Transaction, ) -> Result { + let Some(block_hash) = block.hash else { + anyhow::bail!("Block must have a hash"); + }; + let Some(block_number) = block.number else { + anyhow::bail!("Block must have a number"); + }; + let tx_receipt = client .get_transaction_receipt(tx.hash) .await? .context("Transaction receipt not found")?; - if tx_receipt - .block_hash - .context("Block hash not found in tx receipt")? - != block.hash.unwrap() - { + if tx_receipt.block_hash.context("Block hash not found in tx receipt")? != block_hash { bail!("Transaction receipt block hash does not match block hash"); } let currency = config.currency(); let mut operations = vec![]; - let fee_ops = get_fee_operations(block, tx, &tx_receipt, ¤cy)?; + let fee_ops = get_fee_operations(&block, tx, &tx_receipt, ¤cy)?; operations.extend(fee_ops); - let tx_trace = if block.number.unwrap().as_u64() != 0 { + let tx_trace = if block_number.is_zero() { + None + } else { let trace = get_transaction_trace(&tx.hash, client).await?; - let trace_ops = get_trace_operations(trace.clone(), operations.len() as i64, ¤cy)?; + let trace_ops = get_trace_operations( + trace.clone(), + i64::try_from(operations.len()).context("operations overflow")?, + ¤cy, + )?; operations.extend(trace_ops); Some(trace) - } else { - None }; Ok(rosetta_types::Transaction { - transaction_identifier: TransactionIdentifier { - hash: hex::encode(tx.hash), - }, + transaction_identifier: TransactionIdentifier { hash: hex::encode(tx.hash) }, operations, related_transactions: None, metadata: Some(json!({ @@ -76,17 +86,12 @@ fn get_fee_operations( ) -> Result> { let miner = block.author.context("block has no author")?; let base_fee = block.base_fee_per_gas.context("block has no base fee")?; - let tx_type = tx - .transaction_type - .context("transaction type unavailable")?; + let tx_type = tx.transaction_type.context("transaction type unavailable")?; let tx_gas_price = tx.gas_price.context("gas price is not available")?; let tx_max_priority_fee_per_gas = tx.max_priority_fee_per_gas.unwrap_or_default(); let gas_used = receipt.gas_used.context("gas used is not available")?; - let gas_price = if tx_type.as_u64() == 2 { - base_fee + tx_max_priority_fee_per_gas - } else { - tx_gas_price - }; + let gas_price = + if tx_type.as_u64() == 2 { base_fee + tx_max_priority_fee_per_gas } else { tx_gas_price }; let fee_amount = gas_used * gas_price; let fee_burned = gas_used * base_fee; let miner_earned_reward = fee_amount - fee_burned; @@ -94,10 +99,7 @@ fn get_fee_operations( let mut operations = vec![]; let first_op = Operation { - operation_identifier: OperationIdentifier { - index: 0, - network_index: None, - }, + operation_identifier: OperationIdentifier { index: 0, network_index: None }, related_operations: None, r#type: FEE_OP_TYPE.into(), status: Some(SUCCESS_STATUS.into()), @@ -116,14 +118,8 @@ fn get_fee_operations( }; let second_op = Operation { - operation_identifier: OperationIdentifier { - index: 1, - network_index: None, - }, - related_operations: Some(vec![OperationIdentifier { - index: 0, - network_index: None, - }]), + operation_identifier: OperationIdentifier { index: 1, network_index: None }, + related_operations: Some(vec![OperationIdentifier { index: 0, network_index: None }]), r#type: FEE_OP_TYPE.into(), status: Some(SUCCESS_STATUS.into()), account: Some(AccountIdentifier { @@ -145,10 +141,7 @@ fn get_fee_operations( if fee_burned != U256::from(0) { let burned_operation = Operation { - operation_identifier: OperationIdentifier { - index: 2, - network_index: None, - }, + operation_identifier: OperationIdentifier { index: 2, network_index: None }, related_operations: None, r#type: FEE_OP_TYPE.into(), status: Some(SUCCESS_STATUS.into()), @@ -184,6 +177,7 @@ async fn get_transaction_trace( Ok(client.request("debug_traceTransaction", params).await?) } +#[allow(clippy::too_many_lines)] fn get_trace_operations(trace: Trace, op_len: i64, currency: &Currency) -> Result> { let mut traces = VecDeque::new(); traces.push_back(trace); @@ -210,22 +204,11 @@ fn get_trace_operations(trace: Trace, op_len: i64, currency: &Currency) -> Resul } for trace in traces { - let mut metadata: HashMap = HashMap::new(); - let mut operation_status = SUCCESS_STATUS; - if trace.revert { - operation_status = FAILURE_STATUS; - metadata.insert("error".into(), trace.error_message); - } + let operation_status = if trace.revert { FAILURE_STATUS } else { SUCCESS_STATUS }; - let mut zero_value = false; - if trace.value == U256::from(0) { - zero_value = true; - } + let zero_value = trace.value.is_zero(); - let mut should_add = true; - if zero_value && trace.trace_type == CALL_OP_TYPE { - should_add = false; - } + let should_add = !(zero_value && trace.trace_type == CALL_OP_TYPE); let from = to_checksum(&trace.from, None); let to = to_checksum(&trace.to, None); @@ -233,7 +216,8 @@ fn get_trace_operations(trace: Trace, op_len: i64, currency: &Currency) -> Resul if should_add { let mut from_operation = Operation { operation_identifier: OperationIdentifier { - index: op_len + operations.len() as i64, + index: op_len + + i64::try_from(operations.len()).context("operation.index overflow")?, network_index: None, }, related_operations: None, @@ -367,7 +351,7 @@ pub async fn block_reward_transaction( let block_number = block.number.context("missing block number")?.as_u64(); let block_hash = block.hash.context("missing block hash")?; let block_id = BlockId::Hash(block_hash); - let miner = block.author.unwrap(); + let miner = block.author.context("missing block author")?; let mut uncles = vec![]; for (i, _) in block.uncles.iter().enumerate() { @@ -379,23 +363,20 @@ pub async fn block_reward_transaction( } let chain_config = TESTNET_CHAIN_CONFIG; - let mut mining_reward = FRONTIER_BLOCK_REWARD; - if chain_config.byzantium_block <= block_number { - mining_reward = BYZANTIUM_BLOCK_REWARD; - } - if chain_config.constantinople_block <= block_number { - mining_reward = CONSTANTINOPLE_BLOCK_REWARD; - } + let mut mining_reward = if chain_config.constantinople_block <= block_number { + CONSTANTINOPLE_BLOCK_REWARD + } else if chain_config.byzantium_block <= block_number { + BYZANTIUM_BLOCK_REWARD + } else { + FRONTIER_BLOCK_REWARD + }; if !uncles.is_empty() { mining_reward += (mining_reward / UNCLE_REWARD_MULTIPLIER) * mining_reward; } let mut operations = vec![]; let mining_reward_operation = Operation { - operation_identifier: OperationIdentifier { - index: 0, - network_index: None, - }, + operation_identifier: OperationIdentifier { index: 0, network_index: None }, related_operations: None, r#type: MINING_REWARD_OP_TYPE.into(), status: Some(SUCCESS_STATUS.into()), @@ -422,7 +403,7 @@ pub async fn block_reward_transaction( let operation = Operation { operation_identifier: OperationIdentifier { - index: operations.len() as i64, + index: i64::try_from(operations.len()).context("operation.index overflow")?, network_index: None, }, related_operations: None, @@ -445,9 +426,7 @@ pub async fn block_reward_transaction( } Ok(rosetta_types::Transaction { - transaction_identifier: TransactionIdentifier { - hash: hex::encode(block_hash), - }, + transaction_identifier: TransactionIdentifier { hash: hex::encode(block_hash) }, related_transactions: None, operations, metadata: None, diff --git a/chains/ethereum/tx/src/lib.rs b/chains/ethereum/tx/src/lib.rs index 3dbbce45..489a395f 100644 --- a/chains/ethereum/tx/src/lib.rs +++ b/chains/ethereum/tx/src/lib.rs @@ -1,11 +1,16 @@ use anyhow::Result; use ethabi::token::{LenientTokenizer, Tokenizer}; -use ethers_core::abi::HumanReadableParser; -use ethers_core::types::{Eip1559TransactionRequest, NameOrAddress, Signature, H160}; +use ethers_core::{ + abi::HumanReadableParser, + types::{ + transaction::eip2930::AccessList, Eip1559TransactionRequest, NameOrAddress, Signature, H160, + }, +}; use rosetta_config_ethereum::{EthereumMetadata, EthereumMetadataParams}; -use rosetta_core::crypto::address::Address; -use rosetta_core::crypto::SecretKey; -use rosetta_core::{BlockchainConfig, TransactionBuilder}; +use rosetta_core::{ + crypto::{address::Address, SecretKey}, + BlockchainConfig, TransactionBuilder, +}; use sha3::{Digest, Keccak256}; pub use ethers_core::types::U256; @@ -64,6 +69,7 @@ impl TransactionBuilder for EthereumTransactionBuilder { metadata: &Self::Metadata, secret_key: &SecretKey, ) -> Vec { + #[allow(clippy::unwrap_used)] let from = secret_key .public_key() .to_address(config.address_format) @@ -82,7 +88,7 @@ impl TransactionBuilder for EthereumTransactionBuilder { value: Some(U256(metadata_params.amount)), data: Some(metadata_params.data.clone().into()), nonce: Some(metadata.nonce.into()), - access_list: Default::default(), + access_list: AccessList::default(), max_priority_fee_per_gas: Some(U256(metadata.max_priority_fee_per_gas)), max_fee_per_gas: Some(U256(metadata.max_fee_per_gas)), chain_id: Some(metadata.chain_id.into()), @@ -91,11 +97,12 @@ impl TransactionBuilder for EthereumTransactionBuilder { hasher.update([0x02]); hasher.update(tx.rlp()); let hash = hasher.finalize(); + #[allow(clippy::unwrap_used)] let signature = secret_key.sign_prehashed(&hash).unwrap().to_bytes(); let rlp = tx.rlp_signed(&Signature { r: U256::from_big_endian(&signature[..32]), s: U256::from_big_endian(&signature[32..64]), - v: signature[64] as _, + v: u64::from(signature[64]), }); let mut tx = Vec::with_capacity(rlp.len() + 1); tx.push(0x02); diff --git a/chains/polkadot/config/src/lib.rs b/chains/polkadot/config/src/lib.rs index 99525835..6997d2f3 100644 --- a/chains/polkadot/config/src/lib.rs +++ b/chains/polkadot/config/src/lib.rs @@ -1,7 +1,11 @@ use anyhow::Result; -use rosetta_core::crypto::address::{AddressFormat, Ss58AddressFormatRegistry}; -use rosetta_core::crypto::Algorithm; -use rosetta_core::{BlockchainConfig, NodeUri}; +use rosetta_core::{ + crypto::{ + address::{AddressFormat, Ss58AddressFormatRegistry}, + Algorithm, + }, + BlockchainConfig, NodeUri, +}; use serde::{Deserialize, Serialize}; use std::sync::Arc; use subxt::ext::sp_core::crypto::Ss58AddressFormat; @@ -12,7 +16,7 @@ pub mod metadata { pub mod dev {} } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct PolkadotNetworkProperties { blockchain: &'static str, network: &'static str, @@ -31,11 +35,7 @@ impl TryFrom<&str> for PolkadotNetworkProperties { // All blockchains in polkadot have "dev", "local" and "staging" variants // "dev" and "polkadot-dev" are the same - let chain = if value == "dev" { - "polkadot-dev" - } else { - value - }; + let chain = if value == "dev" { "polkadot-dev" } else { value }; // Split blockchain and network let (blockchain, network) = chain.split_once('-').unwrap_or((chain, "")); @@ -98,15 +98,21 @@ impl TryFrom<&str> for PolkadotNetworkProperties { impl PolkadotNetworkProperties { // TODO: What is considered testnet? only local chains, or public testnets as well? + #[must_use] pub fn is_testnet(&self) -> bool { self.network != "mainnet" } + #[must_use] pub fn is_live(&self) -> bool { matches!(self.network, "mainnet" | "staging") } } +/// Retrieve the [`BlockchainConfig`] from the provided `network` +/// +/// # Errors +/// Returns `Err` if the network is not supported pub fn config(network: &str) -> Result { let properties = PolkadotNetworkProperties::try_from(network)?; @@ -125,9 +131,10 @@ pub fn config(network: &str) -> Result { node_uri: NodeUri::parse("ws://127.0.0.1:9944")?, node_image: "parity/polkadot:v1.0.0", node_command: Arc::new(move |network, port| { - let chain = match network { - "mainnet" => blockchain.to_string(), - _ => format!("{blockchain}-{network}"), + let chain = if network == "mainnet" { + blockchain.to_string() + } else { + format!("{blockchain}-{network}") }; match network { "dev" | "local" => vec![ diff --git a/chains/polkadot/server/Cargo.toml b/chains/polkadot/server/Cargo.toml index 95473953..da9376ae 100644 --- a/chains/polkadot/server/Cargo.toml +++ b/chains/polkadot/server/Cargo.toml @@ -21,6 +21,7 @@ serde_json = "1.0" sp-keyring = "24.0" subxt = { version = "0.31", default-features = false, features = ["substrate-compat", "native"] } tokio = { version = "1.32", features = ["rt-multi-thread", "macros"] } +tracing = "0.1" [dev-dependencies] rosetta-docker = { version = "0.4.0", path = "../../../rosetta-docker", features = ["tests"] } diff --git a/chains/polkadot/server/src/block.rs b/chains/polkadot/server/src/block.rs index 4860af64..fc45b6f2 100644 --- a/chains/polkadot/server/src/block.rs +++ b/chains/polkadot/server/src/block.rs @@ -9,7 +9,7 @@ use rosetta_core::{ }; use serde_json::{json, Value}; use subxt::{ - blocks::ExtrinsicDetails, + blocks::{ExtrinsicDetails, ExtrinsicEvents}, config::Hasher, events::EventDetails, ext::scale_value::{scale::TypeId, Composite, Primitive, ValueDef}, @@ -17,39 +17,43 @@ use subxt::{ Config, OnlineClient, }; -pub async fn get_transaction>( - config: &BlockchainConfig, +pub fn get_transaction_identifier>( extrinsic: &ExtrinsicDetails>, +) -> TransactionIdentifier { + TransactionIdentifier { hash: hex::encode(T::Hasher::hash_of(&extrinsic.bytes())) } +} + +pub fn get_transaction + Send>( + config: &BlockchainConfig, + transaction_identifier: TransactionIdentifier, + events: &ExtrinsicEvents, ) -> Result { - let events = extrinsic.events().await?; + // let transaction_identifier = TransactionIdentifier { + // hash: hex::encode(T::Hasher::hash_of(&extrinsic.bytes())), + // }; + // let events = extrinsic.events().await?; let mut operations = vec![]; for (event_index, event_data) in events.iter().enumerate() { let event = event_data?; let event_parsed_data = get_operation_data(config, &event)?; let mut fields = vec![]; - for field in event.event_metadata().variant.fields.iter() { + for field in &event.event_metadata().variant.fields { fields.push(json!({"name": field.name, "type": field.type_name})); } let op_metadata = Value::Array(fields); - let op_from: Option = - event_parsed_data.from.map(|address| AccountIdentifier { - address, - sub_account: None, - metadata: None, - }); + let op_from: Option = event_parsed_data + .from + .map(|address| AccountIdentifier { address, sub_account: None, metadata: None }); - let op_neg_amount: Option = - event_parsed_data.amount.as_ref().map(|amount| Amount { - value: format!("-{amount}"), - currency: config.currency(), - metadata: None, - }); + let op_neg_amount: Option = event_parsed_data.amount.as_ref().map(|amount| { + Amount { value: format!("-{amount}"), currency: config.currency(), metadata: None } + }); let operation = Operation { operation_identifier: OperationIdentifier { - index: event_index as i64, + index: i64::try_from(event_index).context("event_index overflow")?, network_index: None, }, related_operations: None, @@ -65,31 +69,21 @@ pub async fn get_transaction>( if let (Some(to), Some(amount)) = (event_parsed_data.to, event_parsed_data.amount) { operations.push(Operation { operation_identifier: OperationIdentifier { - index: event_index as i64, + index: i64::try_from(event_index).context("event_index overflow")?, network_index: None, }, related_operations: None, r#type: event_parsed_data.event_type, status: None, - account: Some(AccountIdentifier { - address: to, - sub_account: None, - metadata: None, - }), - amount: Some(Amount { - value: amount, - currency: config.currency(), - metadata: None, - }), + account: Some(AccountIdentifier { address: to, sub_account: None, metadata: None }), + amount: Some(Amount { value: amount, currency: config.currency(), metadata: None }), coin_change: None, metadata: Some(op_metadata), }); } } Ok(Transaction { - transaction_identifier: TransactionIdentifier { - hash: hex::encode(T::Hasher::hash_of(&extrinsic.bytes())), - }, + transaction_identifier, operations, related_transactions: None, metadata: None, @@ -107,58 +101,43 @@ fn get_operation_data>( let event_fields = event.field_values()?; let parsed_data = match event_fields { - subxt::ext::scale_value::Composite::Named(value) => { - let from_data = value - .iter() - .filter(|(k, _)| k == "from" || k == "who" || k == "account") - .collect::>(); - - let sender_address: Option = if !from_data.is_empty() { - let data = from_data.into_iter().next().context("invalid operation")?; + Composite::Named(value) => { + let mut from_data = + value.iter().filter(|(k, _)| k == "from" || k == "who" || k == "account"); + let sender_address: Option = if let Some(data) = from_data.next() { let address = generate_address(config, &data.1.value)?; Some(address) } else { None }; - let amount_data = value - .iter() - .filter(|(k, _)| k == "amount" || k == "actual_fee") - .collect::>(); - - let amount: Option = if !amount_data.is_empty() { - let value = amount_data - .into_iter() - .next() - .context("invalid operation")?; - + let amount: Option = if let Some(value) = + value.iter().find(|(k, _)| k == "amount" || k == "actual_fee") + { match &value.1.value { ValueDef::Primitive(Primitive::U128(amount)) => Some(amount.to_string()), _ => { anyhow::bail!("invalid operation"); - } + }, } } else { None }; - let to_data = value.iter().filter(|(k, _)| k == "to").collect::>(); - - let to_address: Option = if !to_data.is_empty() { - let data = to_data.into_iter().next().context("invalid operation")?; - - let address = generate_address(config, &data.1.value)?; - Some(address) - } else { - None - }; + let to_address: Option = + if let Some(data) = value.iter().find(|(k, _)| k == "to") { + let address = generate_address(config, &data.1.value)?; + Some(address) + } else { + None + }; (sender_address, amount, to_address) - } - _ => { + }, + Composite::Unnamed(_) => { anyhow::bail!("invalid operation"); - } + }, }; Ok(TransactionOperationStatus { @@ -186,16 +165,20 @@ fn generate_address(config: &BlockchainConfig, val: &ValueDef) -> Result for data in data.values() { match data.value { ValueDef::Primitive(Primitive::U128(val)) => { - addr_array.push(val as u8); - } + let Ok(val) = u8::try_from(val) else { + tracing::error!("overflow: {val} > 255"); + anyhow::bail!("overflow: {val} > 255"); + }; + addr_array.push(val); + }, _ => anyhow::bail!("invalid operation"), } } - } + }, _ => anyhow::bail!("invalid operation"), } } - } + }, _ => anyhow::bail!("invalid operation"), } diff --git a/chains/polkadot/server/src/call.rs b/chains/polkadot/server/src/call.rs index 249fb9b3..d7eef74e 100644 --- a/chains/polkadot/server/src/call.rs +++ b/chains/polkadot/server/src/call.rs @@ -1,11 +1,10 @@ use anyhow::{Context, Result}; -use scale_info::PortableRegistry; use scale_info::{ - form::PortableForm, TypeDef, TypeDefArray, TypeDefBitSequence, TypeDefCompact, - TypeDefComposite, TypeDefPrimitive, TypeDefSequence, TypeDefTuple, TypeDefVariant, + form::PortableForm, PortableRegistry, TypeDef, TypeDefArray, TypeDefBitSequence, + TypeDefCompact, TypeDefComposite, TypeDefPrimitive, TypeDefSequence, TypeDefTuple, + TypeDefVariant, }; -use serde_json::Value; -use serde_json::{Map, Value as SerdeValue}; +use serde_json::{Map, Value, Value as SerdeValue}; use subxt::{ dynamic::Value as SubxtValue, ext::scale_value::{self, scale::TypeId, BitSequence, ValueDef}, @@ -44,17 +43,16 @@ pub async fn dynamic_storage_req( let storage_type = storage_metadata.entry_type().clone(); let type_id = match storage_type { StorageEntryType::Map { key_ty, .. } => Some(key_ty), - _ => None, + StorageEntryType::Plain(_) => None, }; let params = if let Some(id) = type_id { let ty = types.resolve(id).context("invalid metadata")?; - match ty.type_def { - TypeDef::Tuple(_) => type_distributor(params, &ty.type_def, types)?, - _ => { - let json_params = params.as_array().context("expected array")?; - let params = json_params.iter().next().context("invalid params")?.clone(); - type_distributor(params, &ty.type_def, types)? - } + if let TypeDef::Tuple(_) = ty.type_def { + type_distributor(params, &ty.type_def, types)? + } else { + let json_params = params.as_array().context("expected array")?; + let params = json_params.iter().next().context("invalid params")?.clone(); + type_distributor(params, &ty.type_def, types)? } } else { vec![] @@ -64,12 +62,7 @@ pub async fn dynamic_storage_req( let storage_address = subxt::dynamic::storage(pallet_name, storage_name, params); - let data = subxt - .storage() - .at_latest() - .await? - .fetch_or_default(&storage_address) - .await?; + let data = subxt.storage().at_latest().await?.fetch_or_default(&storage_address).await?; let serde_val = if data.encoded() == [0] { Value::Null @@ -108,7 +101,7 @@ fn type_distributor( ) -> Result> { let mut value_vec = vec![]; let val = match type_from_pallet { - TypeDef::Variant(inner_val) => make_variant(json_value, inner_val, types), + TypeDef::Variant(inner_val) => make_variant(&json_value, inner_val, types), TypeDef::Composite(inner_val) => make_composite(json_value, inner_val, types), TypeDef::Array(inner_val) => make_array(json_value, inner_val), TypeDef::Tuple(inner_val) => make_tuple(json_value, inner_val, types), @@ -122,7 +115,7 @@ fn type_distributor( } fn make_variant( - json_value: Value, + json_value: &Value, type_from_pallet: &TypeDefVariant, types: &PortableRegistry, ) -> Result { @@ -150,10 +143,8 @@ fn make_variant( if let Ok(obtained_types) = obtained_result { if let Some(obtained_type) = obtained_types.into_iter().next() { if is_named { - vec_of_named_data.push(( - field.name.context("invalid metadata")?.to_string(), - obtained_type, - )); + vec_of_named_data + .push((field.name.context("invalid metadata")?.to_string(), obtained_type)); } else { vec_of_unnamed_data.push(obtained_type); } @@ -195,7 +186,7 @@ fn make_composite( } } } - } + }, std::cmp::Ordering::Greater => { let json_value = json_value.as_array().context("invalid params")?; for (value_received, field) in json_value.iter().zip(fields) { @@ -215,10 +206,10 @@ fn make_composite( } } } - } + }, std::cmp::Ordering::Less => { //keep the vector empty - } + }, } if is_named { @@ -290,7 +281,7 @@ fn make_primitive(json_value: Value, _type_from_pallet: &TypeDefPrimitive) -> Re let number_string = val.to_string(); let number_i128 = number_string.parse::()?; Ok(SubxtValue::u128(number_i128)) - } + }, Value::String(val) => Ok(SubxtValue::string(val)), _ => anyhow::bail!("expected bool number or string"), } @@ -305,7 +296,7 @@ fn make_compact( let number_string = val.to_string(); let number_i128 = number_string.parse::()?; Ok(SubxtValue::u128(number_i128)) - } + }, _ => anyhow::bail!("expected number"), } } @@ -322,7 +313,7 @@ fn make_bit_sequence( Value::Number(val) => { let number = val.as_u64().context("invalid params")?; bits_array.push(number != 0); - } + }, _ => anyhow::bail!("expected bit sequence"), } } @@ -339,14 +330,14 @@ fn scale_to_serde_json(data: ValueDef) -> Result { map.insert(key, scale_to_serde_json(value.value)?); } Ok(SerdeValue::Object(map)) - } + }, scale_value::Composite::Unnamed(val) => { let mut vec_of_array = vec![]; for value in val { vec_of_array.push(scale_to_serde_json(value.value)?); } Ok(SerdeValue::Array(vec_of_array)) - } + }, }, scale_value::ValueDef::Variant(val) => { if val.values.is_empty() { @@ -356,14 +347,14 @@ fn scale_to_serde_json(data: ValueDef) -> Result { map.insert(val.name, scale_to_serde_json(val.values.into())?); Ok(SerdeValue::Object(map)) } - } + }, scale_value::ValueDef::BitSequence(val) => { let mut vec_of_array = vec![]; for i in val { vec_of_array.push(SerdeValue::Bool(i)); } Ok(SerdeValue::Array(vec_of_array)) - } + }, scale_value::ValueDef::Primitive(val) => match val { scale_value::Primitive::Bool(val) => Ok(SerdeValue::Bool(val)), scale_value::Primitive::Char(val) => Ok(SerdeValue::String(val.to_string())), diff --git a/chains/polkadot/server/src/lib.rs b/chains/polkadot/server/src/lib.rs index e5a3430d..918aa3f2 100644 --- a/chains/polkadot/server/src/lib.rs +++ b/chains/polkadot/server/src/lib.rs @@ -14,8 +14,8 @@ use rosetta_server::ws::default_client; use serde_json::Value; use sp_keyring::AccountKeyring; use std::time::Duration; -use subxt::config::{Hasher, Header}; use subxt::{ + config::{Hasher, Header}, dynamic::Value as SubtxValue, rpc::types::BlockNumber, tx::{PairSigner, SubmittableExtrinsic}, @@ -33,26 +33,27 @@ pub struct PolkadotClient { } impl PolkadotClient { + /// Creates a new polkadot client, loading the config from `network` and connects to `addr` + /// + /// # Errors + /// Will return `Err` when the network is invalid, or when the provided `addr` is unreacheable. pub async fn new(network: &str, addr: &str) -> Result { let config = rosetta_config_polkadot::config(network)?; Self::from_config(config, addr).await } + /// Creates a new bitcoin client using the provided `config` and connets to `addr` + /// + /// # Errors + /// Will return `Err` when the network is invalid, or when the provided `addr` is unreacheable. pub async fn from_config(config: BlockchainConfig, addr: &str) -> Result { let client = { let ws_client = default_client(addr, None).await?; OnlineClient::::from_rpc_client(std::sync::Arc::new(ws_client)).await? }; let genesis = client.genesis_hash(); - let genesis_block = BlockIdentifier { - index: 0, - hash: hex::encode(genesis.as_ref()), - }; - Ok(Self { - config, - client, - genesis_block, - }) + let genesis_block = BlockIdentifier { index: 0, hash: hex::encode(genesis.as_ref()) }; + Ok(Self { config, client, genesis_block }) } async fn account_info( @@ -79,30 +80,23 @@ impl PolkadotClient { .ok_or_else(|| anyhow::anyhow!("no block hash found"))? }; - let account_info = self - .client - .storage() - .at(block_hash) - .fetch(&storage_query) - .await?; - - if let Some(account_info) = account_info { - AccountInfo::decode(&mut account_info.encoded()) - .map_err(|_| anyhow::anyhow!("invalid format")) - } else { - Ok(AccountInfo { - nonce: 0, - consumers: 0, - providers: 0, - sufficients: 0, - data: AccountData { - free: 0, - reserved: 0, - misc_frozen: 0, - fee_frozen: 0, - }, - }) - } + let account_info = self.client.storage().at(block_hash).fetch(&storage_query).await?; + + account_info.map_or_else( + || { + Ok(AccountInfo { + nonce: 0, + consumers: 0, + providers: 0, + sufficients: 0, + data: AccountData { free: 0, reserved: 0, misc_frozen: 0, fee_frozen: 0 }, + }) + }, + |account_info| { + AccountInfo::decode(&mut account_info.encoded()) + .map_err(|_| anyhow::anyhow!("invalid format")) + }, + ) } } @@ -125,18 +119,10 @@ impl BlockchainClient for PolkadotClient { } async fn current_block(&self) -> Result { - let block = self - .client - .rpc() - .block(None) - .await? - .context("no current block")?; - let index = block.block.header.number as _; + let block = self.client.rpc().block(None).await?.context("no current block")?; + let index = u64::from(block.block.header.number); let hash = block.block.header.hash(); - Ok(BlockIdentifier { - index, - hash: hex::encode(hash.as_ref()), - }) + Ok(BlockIdentifier { index, hash: hex::encode(hash.as_ref()) }) } async fn finalized_block(&self) -> Result { @@ -147,12 +133,9 @@ impl BlockchainClient for PolkadotClient { .block(Some(finalized_head)) .await? .context("no finalized block")?; - let index = block.block.header.number as _; + let index = u64::from(block.block.header.number); let hash = block.block.header.hash(); - Ok(BlockIdentifier { - index, - hash: hex::encode(hash.as_ref()), - }) + Ok(BlockIdentifier { index, hash: hex::encode(hash.as_ref()) }) } async fn balance(&self, address: &Address, block: &BlockIdentifier) -> Result { @@ -172,9 +155,7 @@ impl BlockchainClient for PolkadotClient { .context("invalid address")?; let signer = PairSigner::::new(AccountKeyring::Alice.pair()); - let tx = polkadot_metadata::tx() - .balances() - .transfer(address.into(), value); + let tx = polkadot_metadata::tx().balances().transfer(address.into(), value); let hash = self .client @@ -245,26 +226,27 @@ impl BlockchainClient for PolkadotClient { // Build timestamp query let timestamp_now_query = polkadot_metadata::storage().timestamp().now(); - let timestamp = block - .storage() - .fetch_or_default(×tamp_now_query) - .await?; + let timestamp = block.storage().fetch_or_default(×tamp_now_query).await?; let body = block.body().await?; let mut transactions = vec![]; for extrinsic in body.extrinsics().iter().filter_map(Result::ok) { - let transaction = crate::block::get_transaction(self.config(), &extrinsic).await?; + let transaction_identifier = crate::block::get_transaction_identifier(&extrinsic); + let events = extrinsic.events().await?; + let transaction = + crate::block::get_transaction(self.config(), transaction_identifier, &events)?; transactions.push(transaction); } Ok(Block { block_identifier: BlockIdentifier { - index: block.number() as _, + index: u64::from(block.number()), hash: hex::encode(block.hash()), }, parent_block_identifier: BlockIdentifier { - index: block.number().saturating_sub(1) as _, + index: u64::from(block.number().saturating_sub(1)), hash: hex::encode(block.header().parent_hash), }, - timestamp: Duration::from_millis(timestamp).as_nanos() as i64, + timestamp: i64::try_from(Duration::from_millis(timestamp).as_nanos()) + .context("timestamp overflow")?, transactions, metadata: None, }) @@ -286,7 +268,10 @@ impl BlockchainClient for PolkadotClient { ::Hasher::hash_of(&extrinsic.bytes()) == transaction_hash }) .context("transaction not found")?; - crate::block::get_transaction(self.config(), &extrinsic).await + + let identifier = crate::block::get_transaction_identifier(&extrinsic); + let events = extrinsic.events().await?; + crate::block::get_transaction(self.config(), identifier, &events) } async fn call(&self, request: &CallRequest) -> Result { @@ -307,10 +292,10 @@ impl BlockchainClient for PolkadotClient { request.parameters.clone(), ) .await - } + }, _ => { anyhow::bail!("invalid query type"); - } + }, } } } diff --git a/chains/polkadot/tx/src/lib.rs b/chains/polkadot/tx/src/lib.rs index db382fb4..90685cde 100644 --- a/chains/polkadot/tx/src/lib.rs +++ b/chains/polkadot/tx/src/lib.rs @@ -1,9 +1,10 @@ use anyhow::{bail, Context, Result}; use parity_scale_codec::{Compact, Decode, Encode}; use rosetta_config_polkadot::{PolkadotMetadata, PolkadotMetadataParams}; -use rosetta_core::crypto::address::Address; -use rosetta_core::crypto::SecretKey; -use rosetta_core::{BlockchainConfig, TransactionBuilder}; +use rosetta_core::{ + crypto::{address::Address, SecretKey}, + BlockchainConfig, TransactionBuilder, +}; #[derive(Debug, Decode, Encode)] struct AccountId32([u8; 32]); @@ -33,8 +34,8 @@ fn parse_address(address: &Address) -> Result { if data.len() < 2 { anyhow::bail!("ss58: bad length"); } - let (prefix_len, _ident) = match data[0] { - 0..=63 => (1, data[0] as u16), + let (prefix_len, _ident) = match data.first().context("ss58: invalid prefix")? { + 0..=63 => (1, u16::from(data[0])), 64..=127 => { // weird bit manipulation owing to the combination of LE encoding and missing two // bits from the left. @@ -42,9 +43,9 @@ fn parse_address(address: &Address) -> Result { // they make the LE-encoded 16-bit value: aaaaaabb 00cccccc // so the lower byte is formed of aaaaaabb and the higher byte is 00cccccc let lower = (data[0] << 2) | (data[1] >> 6); - let upper = data[1] & 0b00111111; - (2, (lower as u16) | ((upper as u16) << 8)) - } + let upper = data[1] & 0b0011_1111; + (2, u16::from(lower) | (u16::from(upper) << 8)) + }, _ => anyhow::bail!("ss58: invalid prefix"), }; if data.len() != prefix_len + body_len + CHECKSUM_LEN { @@ -62,9 +63,7 @@ fn parse_address(address: &Address) -> Result { anyhow::bail!("invalid checksum"); } - let result = data[prefix_len..body_len + prefix_len] - .try_into() - .context("ss58: bad length")?; + let result = data[prefix_len..body_len + prefix_len].try_into().context("ss58: bad length")?; Ok(AccountId32(result)) } @@ -78,6 +77,13 @@ fn ss58hash(data: &[u8]) -> blake2_rfc::blake2b::Blake2bResult { #[derive(Default)] pub struct PolkadotTransactionBuilder; +#[derive(Debug, Decode, Encode)] +struct Transfer { + pub dest: MultiAddress, + #[codec(compact)] + pub amount: u128, +} + impl TransactionBuilder for PolkadotTransactionBuilder { type MetadataParams = PolkadotMetadataParams; type Metadata = PolkadotMetadata; @@ -85,12 +91,6 @@ impl TransactionBuilder for PolkadotTransactionBuilder { fn transfer(&self, address: &Address, amount: u128) -> Result { let address: AccountId32 = parse_address(address)?; let dest = MultiAddress::Id(address); - #[derive(Debug, Decode, Encode)] - struct Transfer { - pub dest: MultiAddress, - #[codec(compact)] - pub amount: u128, - } Ok(PolkadotMetadataParams { pallet_name: "Balances".into(), call_name: "transfer".into(), @@ -115,11 +115,12 @@ impl TransactionBuilder for PolkadotTransactionBuilder { metadata: &Self::Metadata, secret_key: &SecretKey, ) -> Vec { + #[allow(clippy::unwrap_used)] let address = AccountId32(secret_key.public_key().to_bytes().try_into().unwrap()); let address = MultiAddress::Id(address); let extra_parameters = ( Era::Immortal, - Compact(metadata.nonce as u64), + Compact(u64::from(metadata.nonce)), // plain tip Compact(0u128), ); @@ -145,13 +146,14 @@ impl TransactionBuilder for PolkadotTransactionBuilder { } else { secret_key.sign(&payload, "substrate") }; + #[allow(clippy::unwrap_used)] let signature = MultiSignature::Sr25519(signature.to_bytes().as_slice().try_into().unwrap()); // encode transaction let mut encoded = vec![]; // "is signed" + transaction protocol version (4) - (0b10000000 + 4u8).encode_to(&mut encoded); + (0b1000_0000 + 4u8).encode_to(&mut encoded); // from address for signature address.encode_to(&mut encoded); // signature encode pending to vector @@ -164,7 +166,8 @@ impl TransactionBuilder for PolkadotTransactionBuilder { encoded.extend(&metadata_params.call_args); // now, prefix byte length: - let len = Compact(encoded.len() as u32); + #[allow(clippy::expect_used)] + let len = Compact(u32::try_from(encoded.len()).expect("tx cannot have more than 32 bits")); let mut transaction = vec![]; len.encode_to(&mut transaction); transaction.extend(encoded); diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 00000000..0358cdb5 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,2 @@ +allow-unwrap-in-tests = true +allow-expect-in-tests = true diff --git a/rosetta-client/src/client.rs b/rosetta-client/src/client.rs index 21813015..c8dfc3f1 100644 --- a/rosetta-client/src/client.rs +++ b/rosetta-client/src/client.rs @@ -1,16 +1,16 @@ #![allow(missing_docs)] -use crate::crypto::address::Address; -use crate::crypto::PublicKey; -use crate::types::{ - Block, BlockIdentifier, CallRequest, Coin, PartialBlockIdentifier, Transaction, - TransactionIdentifier, +use crate::{ + crypto::{address::Address, PublicKey}, + types::{ + Block, BlockIdentifier, CallRequest, Coin, PartialBlockIdentifier, Transaction, + TransactionIdentifier, + }, + Blockchain, BlockchainConfig, }; -use crate::{Blockchain, BlockchainConfig}; use anyhow::Result; use derive_more::From; use futures::Stream; -use rosetta_core::BlockchainClient; -use rosetta_core::ClientEvent; +use rosetta_core::{BlockchainClient, ClientEvent}; use rosetta_server_astar::{AstarClient, AstarMetadata, AstarMetadataParams}; use rosetta_server_bitcoin::{BitcoinClient, BitcoinMetadata, BitcoinMetadataParams}; use rosetta_server_ethereum::{ @@ -19,8 +19,7 @@ use rosetta_server_ethereum::{ use rosetta_server_polkadot::{PolkadotClient, PolkadotMetadata, PolkadotMetadataParams}; use serde::{Deserialize, Serialize}; use serde_json::Value; -use std::pin::Pin; -use std::str::FromStr; +use std::{pin::Pin, str::FromStr}; /// Generic Client pub enum GenericClient { @@ -30,29 +29,30 @@ pub enum GenericClient { Polkadot(PolkadotClient), } +#[allow(clippy::missing_errors_doc)] impl GenericClient { pub async fn new(blockchain: Blockchain, network: &str, url: &str) -> Result { Ok(match blockchain { Blockchain::Bitcoin => { let client = BitcoinClient::new(network, url).await?; Self::Bitcoin(client) - } + }, Blockchain::Ethereum => { let client = EthereumClient::new("ethereum", network, url).await?; Self::Ethereum(client) - } + }, Blockchain::Polygon => { let client = EthereumClient::new("polygon", network, url).await?; Self::Ethereum(client) - } + }, Blockchain::Astar => { let client = AstarClient::new(network, url).await?; Self::Astar(client) - } + }, Blockchain::Polkadot => { let client = PolkadotClient::new(network, url).await?; Self::Polkadot(client) - } + }, }) } @@ -62,19 +62,19 @@ impl GenericClient { Blockchain::Bitcoin => { let client = BitcoinClient::from_config(config, url).await?; Self::Bitcoin(client) - } + }, Blockchain::Ethereum | Blockchain::Polygon => { let client = EthereumClient::from_config(config, url).await?; Self::Ethereum(client) - } + }, Blockchain::Astar => { let client = AstarClient::from_config(config, url).await?; Self::Astar(client) - } + }, Blockchain::Polkadot => { let client = PolkadotClient::from_config(config, url).await?; Self::Polkadot(client) - } + }, }) } } @@ -154,16 +154,16 @@ impl BlockchainClient for GenericClient { Ok(match (self, params) { (Self::Bitcoin(client), GenericMetadataParams::Bitcoin(params)) => { client.metadata(public_key, params).await?.into() - } + }, (Self::Ethereum(client), GenericMetadataParams::Ethereum(params)) => { client.metadata(public_key, params).await?.into() - } + }, (Self::Astar(client), GenericMetadataParams::Astar(params)) => { client.metadata(public_key, params).await?.into() - } + }, (Self::Polkadot(client), GenericMetadataParams::Polkadot(params)) => { client.metadata(public_key, params).await?.into() - } + }, _ => anyhow::bail!("invalid params"), }) } @@ -190,8 +190,9 @@ impl BlockchainClient for GenericClient { /// Return a stream of events, return None if the blockchain doesn't support events. async fn listen<'a>(&'a self) -> Result>> { - Ok(dispatch!(self.listen().await?.map(|s| Pin::new( - Box::new(s) as Box + Send + Unpin> - )))) + Ok(dispatch!(self + .listen() + .await? + .map(|s| Pin::new(Box::new(s) as Box + Send + Unpin>)))) } } diff --git a/rosetta-client/src/mnemonic.rs b/rosetta-client/src/mnemonic.rs index c79480e4..5623b211 100644 --- a/rosetta-client/src/mnemonic.rs +++ b/rosetta-client/src/mnemonic.rs @@ -1,5 +1,5 @@ use crate::crypto::bip39::{Language, Mnemonic}; -use anyhow::Result; +use anyhow::{Context, Result}; #[cfg(not(target_family = "wasm"))] use std::fs::OpenOptions; #[cfg(not(target_family = "wasm"))] @@ -67,9 +67,10 @@ impl MnemonicStore { /// Sets the stored mnemonic. pub fn set(&self, mnemonic: &Mnemonic) -> Result<()> { - std::fs::create_dir_all(self.path.parent().unwrap())?; #[cfg(unix)] use std::os::unix::fs::OpenOptionsExt; + + std::fs::create_dir_all(self.path.parent().context("cannot create config dir")?)?; let mut opts = OpenOptions::new(); opts.create(true).write(true).truncate(true); #[cfg(unix)] diff --git a/rosetta-client/src/signer.rs b/rosetta-client/src/signer.rs index a600c3ca..f146cb60 100644 --- a/rosetta-client/src/signer.rs +++ b/rosetta-client/src/signer.rs @@ -1,11 +1,13 @@ -use crate::crypto::{ - address::Address, - bip32::{DerivedPublicKey, DerivedSecretKey}, - bip39::Mnemonic, - bip44::ChildNumber, - Algorithm, +use crate::{ + crypto::{ + address::Address, + bip32::{DerivedPublicKey, DerivedSecretKey}, + bip39::Mnemonic, + bip44::ChildNumber, + Algorithm, + }, + types::{AccountIdentifier, CurveType, PublicKey}, }; -use crate::types::{AccountIdentifier, CurveType, PublicKey}; use anyhow::Result; /// Signer derives keys from a mnemonic. @@ -19,6 +21,7 @@ pub struct Signer { impl Signer { /// Creates a new signer from a mnemonic and password. + #[allow(clippy::similar_names, clippy::missing_errors_doc)] pub fn new(mnemonic: &Mnemonic, password: &str) -> Result { let secp256k1 = DerivedSecretKey::new(mnemonic, password, Algorithm::EcdsaSecp256k1)?; let secp256k1_recoverable = @@ -26,41 +29,37 @@ impl Signer { let secp256r1 = DerivedSecretKey::new(mnemonic, password, Algorithm::EcdsaSecp256r1)?; let ed25519 = DerivedSecretKey::new(mnemonic, password, Algorithm::Ed25519)?; let sr25519 = DerivedSecretKey::new(mnemonic, password, Algorithm::Sr25519)?; - Ok(Self { - secp256k1, - secp256k1_recoverable, - secp256r1, - ed25519, - sr25519, - }) + Ok(Self { secp256k1, secp256k1_recoverable, secp256r1, ed25519, sr25519 }) } /// Creates a new ephemeral signer. - #[allow(unused)] + #[allow(unused, clippy::missing_errors_doc)] pub fn generate() -> Result { let mnemonic = crate::mnemonic::generate_mnemonic()?; Self::new(&mnemonic, "") } /// Derives a master key from a mnemonic. - pub fn master_key(&self, algorithm: Algorithm) -> Result<&DerivedSecretKey> { - Ok(match algorithm { + #[must_use] + pub const fn master_key(&self, algorithm: Algorithm) -> &DerivedSecretKey { + match algorithm { Algorithm::EcdsaSecp256k1 => &self.secp256k1, Algorithm::EcdsaRecoverableSecp256k1 => &self.secp256k1_recoverable, Algorithm::EcdsaSecp256r1 => &self.secp256r1, Algorithm::Ed25519 => &self.ed25519, Algorithm::Sr25519 => &self.sr25519, - }) + } } /// Derives a bip44 key from a mnemonic. + #[allow(clippy::missing_errors_doc)] pub fn bip44_account( &self, algorithm: Algorithm, coin: u32, account: u32, ) -> Result { - self.master_key(algorithm)? + self.master_key(algorithm) .derive(ChildNumber::hardened_from_u32(44))? .derive(ChildNumber::hardened_from_u32(coin))? .derive(ChildNumber::hardened_from_u32(account))? @@ -78,8 +77,9 @@ impl RosettaPublicKey for DerivedPublicKey { fn to_rosetta(&self) -> PublicKey { PublicKey { curve_type: match self.public_key().algorithm() { - Algorithm::EcdsaSecp256k1 => CurveType::Secp256k1, - Algorithm::EcdsaRecoverableSecp256k1 => CurveType::Secp256k1, + Algorithm::EcdsaSecp256k1 | Algorithm::EcdsaRecoverableSecp256k1 => { + CurveType::Secp256k1 + }, Algorithm::EcdsaSecp256r1 => CurveType::Secp256r1, Algorithm::Ed25519 => CurveType::Edwards25519, Algorithm::Sr25519 => CurveType::Schnorrkel, @@ -97,10 +97,6 @@ pub trait RosettaAccount { impl RosettaAccount for Address { fn to_rosetta(&self) -> AccountIdentifier { - AccountIdentifier { - address: self.address().into(), - sub_account: None, - metadata: None, - } + AccountIdentifier { address: self.address().into(), sub_account: None, metadata: None } } } diff --git a/rosetta-client/src/tx_builder.rs b/rosetta-client/src/tx_builder.rs index 83ddf5cc..646f33c2 100644 --- a/rosetta-client/src/tx_builder.rs +++ b/rosetta-client/src/tx_builder.rs @@ -1,7 +1,8 @@ -use crate::client::{GenericMetadata, GenericMetadataParams}; -use crate::crypto::address::Address; -use crate::crypto::SecretKey; -use crate::BlockchainConfig; +use crate::{ + client::{GenericMetadata, GenericMetadataParams}, + crypto::{address::Address, SecretKey}, + BlockchainConfig, +}; use anyhow::Result; use rosetta_core::TransactionBuilder; use rosetta_server_astar::AstarMetadataParams; @@ -15,9 +16,9 @@ pub enum GenericTransactionBuilder { impl GenericTransactionBuilder { pub fn new(config: &BlockchainConfig) -> Result { Ok(match config.blockchain { - "astar" => Self::Astar(Default::default()), - "ethereum" => Self::Ethereum(Default::default()), - "polkadot" => Self::Polkadot(Default::default()), + "astar" => Self::Astar(rosetta_tx_ethereum::EthereumTransactionBuilder), + "ethereum" => Self::Ethereum(rosetta_tx_ethereum::EthereumTransactionBuilder), + "polkadot" => Self::Polkadot(rosetta_tx_polkadot::PolkadotTransactionBuilder), _ => anyhow::bail!("unsupported blockchain"), }) } @@ -40,7 +41,7 @@ impl GenericTransactionBuilder { Ok(match self { Self::Astar(tx) => { AstarMetadataParams(tx.method_call(contract, method, params, amount)?).into() - } + }, Self::Ethereum(tx) => tx.method_call(contract, method, params, amount)?.into(), Self::Polkadot(tx) => tx.method_call(contract, method, params, amount)?.into(), }) diff --git a/rosetta-client/src/wallet.rs b/rosetta-client/src/wallet.rs index 831edaaf..7773e042 100644 --- a/rosetta-client/src/wallet.rs +++ b/rosetta-client/src/wallet.rs @@ -1,17 +1,17 @@ -use crate::client::{GenericClient, GenericMetadata, GenericMetadataParams}; -use crate::crypto::address::Address; -use crate::crypto::bip32::DerivedSecretKey; -use crate::crypto::bip44::ChildNumber; -use crate::mnemonic::MnemonicStore; -use crate::signer::{RosettaAccount, RosettaPublicKey, Signer}; -use crate::tx_builder::GenericTransactionBuilder; -use crate::types::{ - AccountIdentifier, Amount, BlockIdentifier, Coin, PublicKey, TransactionIdentifier, +use crate::{ + client::{GenericClient, GenericMetadata, GenericMetadataParams}, + crypto::{address::Address, bip32::DerivedSecretKey, bip44::ChildNumber}, + mnemonic::MnemonicStore, + signer::{RosettaAccount, RosettaPublicKey, Signer}, + tx_builder::GenericTransactionBuilder, + types::{AccountIdentifier, Amount, BlockIdentifier, Coin, PublicKey, TransactionIdentifier}, + Blockchain, BlockchainConfig, }; -use crate::{Blockchain, BlockchainConfig}; use anyhow::Result; -use rosetta_core::types::{Block, CallRequest, PartialBlockIdentifier, Transaction}; -use rosetta_core::{BlockchainClient, RosettaAlgorithm}; +use rosetta_core::{ + types::{Block, CallRequest, PartialBlockIdentifier, Transaction}, + BlockchainClient, RosettaAlgorithm, +}; use serde_json::json; use std::path::Path; @@ -27,6 +27,7 @@ pub struct Wallet { impl Wallet { /// Creates a new wallet from blockchain, network, url and keyfile. + #[allow(clippy::missing_errors_doc)] pub async fn new( blockchain: Blockchain, network: &str, @@ -34,21 +35,23 @@ impl Wallet { keyfile: Option<&Path>, ) -> Result { let client = GenericClient::new(blockchain, network, url).await?; - Self::from_client(client, keyfile).await + Self::from_client(client, keyfile) } /// Creates a new wallet from a config, url and keyfile. + #[allow(clippy::missing_errors_doc)] pub async fn from_config( config: BlockchainConfig, url: &str, keyfile: Option<&Path>, ) -> Result { let client = GenericClient::from_config(config, url).await?; - Self::from_client(client, keyfile).await + Self::from_client(client, keyfile) } /// Creates a new wallet from a client, url and keyfile. - pub async fn from_client(client: GenericClient, keyfile: Option<&Path>) -> Result { + #[allow(clippy::missing_errors_doc)] + pub fn from_client(client: GenericClient, keyfile: Option<&Path>) -> Result { let store = MnemonicStore::new(keyfile)?; let mnemonic = match keyfile { Some(_) => store.get_or_generate_mnemonic()?, @@ -61,25 +64,17 @@ impl Wallet { .bip44_account(client.config().algorithm, client.config().coin, 0)? .derive(ChildNumber::non_hardened_from_u32(0))? } else { - signer.master_key(client.config().algorithm)?.clone() + signer.master_key(client.config().algorithm).clone() }; let public_key = secret_key.public_key(); - let account = public_key - .to_address(client.config().address_format) - .to_rosetta(); + let account = public_key.to_address(client.config().address_format).to_rosetta(); let public_key = public_key.to_rosetta(); if public_key.curve_type != client.config().algorithm.to_curve_type() { anyhow::bail!("The signer and client curve type aren't compatible.") } - Ok(Self { - client, - account, - secret_key, - public_key, - tx, - }) + Ok(Self { client, account, secret_key, public_key, tx }) } /// Returns the blockchain config. @@ -88,27 +83,27 @@ impl Wallet { } /// Returns the public key. - pub fn public_key(&self) -> &PublicKey { + pub const fn public_key(&self) -> &PublicKey { &self.public_key } /// Returns the account identifier. - pub fn account(&self) -> &AccountIdentifier { + pub const fn account(&self) -> &AccountIdentifier { &self.account } /// Returns the latest finalized block identifier. + #[allow(clippy::missing_errors_doc)] pub async fn status(&self) -> Result { self.client.finalized_block().await } /// Returns the balance of the wallet. + #[allow(clippy::missing_errors_doc)] pub async fn balance(&self) -> Result { let block = self.client.current_block().await?; - let address = Address::new( - self.client.config().address_format, - self.account.address.clone(), - ); + let address = + Address::new(self.client.config().address_format, self.account.address.clone()); let balance = self.client.balance(&address, &block).await?; Ok(Amount { value: format!("{balance}"), @@ -118,6 +113,7 @@ impl Wallet { } /// Return a stream of events, return None if the blockchain doesn't support events. + #[allow(clippy::missing_errors_doc)] pub async fn listen( &self, ) -> Result::EventStream<'_>>> { @@ -125,28 +121,29 @@ impl Wallet { } /// Returns block data - /// Takes PartialBlockIdentifier + /// Takes `PartialBlockIdentifier` + #[allow(clippy::missing_errors_doc)] pub async fn block(&self, data: PartialBlockIdentifier) -> Result { self.client.block(&data).await } /// Returns transactions included in a block /// Parameters: - /// 1. block_identifier: BlockIdentifier containing block number and hash - /// 2. tx_identifier: TransactionIdentifier containing hash of transaction + /// 1. `block_identifier`: `BlockIdentifier` containing block number and hash + /// 2. `tx_identifier`: `TransactionIdentifier` containing hash of transaction + #[allow(clippy::missing_errors_doc)] pub async fn block_transaction( &self, block_identifer: BlockIdentifier, tx_identifier: TransactionIdentifier, ) -> Result { - self.client - .block_transaction(&block_identifer, &tx_identifier) - .await + self.client.block_transaction(&block_identifer, &tx_identifier).await } /// Extension of rosetta-api does multiple things /// 1. fetching storage /// 2. calling extrinsic/contract + #[allow(clippy::missing_errors_doc)] async fn call( &self, method: String, @@ -163,18 +160,18 @@ impl Wallet { } /// Returns the coins of the wallet. + #[allow(clippy::missing_errors_doc)] pub async fn coins(&self) -> Result> { let block = self.client.current_block().await?; - let address = Address::new( - self.client.config().address_format, - self.account.address.clone(), - ); + let address = + Address::new(self.client.config().address_format, self.account.address.clone()); self.client.coins(&address, &block).await } /// Returns the on chain metadata. /// Parameters: - /// - metadata_params: the metadata parameters which we got from transaction builder. + /// - `metadata_params`: the metadata parameters which we got from transaction builder. + #[allow(clippy::missing_errors_doc)] pub async fn metadata( &self, metadata_params: &GenericMetadataParams, @@ -190,11 +187,13 @@ impl Wallet { /// Submits a transaction and returns the transaction identifier. /// Parameters: /// - transaction: the transaction bytes to submit + #[allow(clippy::missing_errors_doc)] pub async fn submit(&self, transaction: &[u8]) -> Result> { self.client.submit(transaction).await } /// Creates, signs and submits a transaction. + #[allow(clippy::missing_errors_doc)] pub async fn construct(&self, params: &GenericMetadataParams) -> Result> { let metadata = self.metadata(params).await?; let transaction = self.tx.create_and_sign( @@ -210,6 +209,7 @@ impl Wallet { /// Parameters: /// - account: the account to transfer to /// - amount: the amount to transfer + #[allow(clippy::missing_errors_doc)] pub async fn transfer(&self, account: &AccountIdentifier, amount: u128) -> Result> { let address = Address::new(self.client.config().address_format, account.address.clone()); let metadata_params = self.tx.transfer(&address, amount)?; @@ -218,22 +218,23 @@ impl Wallet { /// Uses the faucet on dev chains to seed the account with funds. /// Parameters: - /// - faucet_parameter: the amount to seed the account with + /// - `faucet_parameter`: the amount to seed the account with + #[allow(clippy::missing_errors_doc)] pub async fn faucet(&self, faucet_parameter: u128) -> Result> { - let address = Address::new( - self.client.config().address_format, - self.account.address.clone(), - ); + let address = + Address::new(self.client.config().address_format, self.account.address.clone()); self.client.faucet(&address, faucet_parameter).await } /// deploys contract to chain + #[allow(clippy::missing_errors_doc)] pub async fn eth_deploy_contract(&self, bytecode: Vec) -> Result> { let metadata_params = self.tx.deploy_contract(bytecode)?; self.construct(&metadata_params).await } /// calls contract send call function + #[allow(clippy::missing_errors_doc)] pub async fn eth_send_call( &self, contract_address: &str, @@ -242,12 +243,12 @@ impl Wallet { amount: u128, ) -> Result> { let metadata_params = - self.tx - .method_call(contract_address, method_signature, params, amount)?; + self.tx.method_call(contract_address, method_signature, params, amount)?; self.construct(&metadata_params).await } /// estimates gas of send call + #[allow(clippy::missing_errors_doc)] pub async fn eth_send_call_estimate_gas( &self, contract_address: &str, @@ -256,8 +257,7 @@ impl Wallet { amount: u128, ) -> Result { let metadata_params = - self.tx - .method_call(contract_address, method_signature, params, amount)?; + self.tx.method_call(contract_address, method_signature, params, amount)?; let metadata = match self.metadata(&metadata_params).await? { GenericMetadata::Ethereum(metadata) => metadata, GenericMetadata::Astar(metadata) => metadata.0, @@ -267,6 +267,7 @@ impl Wallet { } /// calls a contract view call function + #[allow(clippy::missing_errors_doc)] pub async fn eth_view_call( &self, contract_address: &str, @@ -274,33 +275,36 @@ impl Wallet { params: &[String], block_identifier: Option, ) -> Result { - let method = format!("{}-{}-call", contract_address, method_signature); + let method = format!("{contract_address}-{method_signature}-call"); self.call(method, &json!(params), block_identifier).await } /// gets storage from ethereum contract + #[allow(clippy::missing_errors_doc)] pub async fn eth_storage( &self, contract_address: &str, storage_slot: &str, block_identifier: Option, ) -> Result { - let method = format!("{}-{}-storage", contract_address, storage_slot); + let method = format!("{contract_address}-{storage_slot}-storage"); self.call(method, &json!({}), block_identifier).await } /// gets storage proof from ethereum contract + #[allow(clippy::missing_errors_doc)] pub async fn eth_storage_proof( &self, contract_address: &str, storage_slot: &str, block_identifier: Option, ) -> Result { - let method = format!("{}-{}-storage_proof", contract_address, storage_slot); + let method = format!("{contract_address}-{storage_slot}-storage_proof"); self.call(method, &json!({}), block_identifier).await } /// gets transaction receipt of specific hash + #[allow(clippy::missing_errors_doc)] pub async fn eth_transaction_receipt(&self, tx_hash: &[u8]) -> Result { let call_method = format!("{}--transaction_receipt", hex::encode(tx_hash)); self.call(call_method, &json!({}), None).await diff --git a/rosetta-core/src/lib.rs b/rosetta-core/src/lib.rs index 25bba5a9..7f1b560c 100644 --- a/rosetta-core/src/lib.rs +++ b/rosetta-core/src/lib.rs @@ -1,17 +1,19 @@ mod node_uri; -use crate::crypto::address::{Address, AddressFormat}; -use crate::crypto::{Algorithm, PublicKey, SecretKey}; -use crate::types::{ - Block, BlockIdentifier, CallRequest, Coin, Currency, CurveType, NetworkIdentifier, - PartialBlockIdentifier, SignatureType, Transaction, TransactionIdentifier, +use crate::{ + crypto::{ + address::{Address, AddressFormat}, + Algorithm, PublicKey, SecretKey, + }, + types::{ + Block, BlockIdentifier, CallRequest, Coin, Currency, CurveType, NetworkIdentifier, + PartialBlockIdentifier, SignatureType, Transaction, TransactionIdentifier, + }, }; use anyhow::Result; use async_trait::async_trait; -pub use futures_util::future; -pub use futures_util::stream; -use serde::de::DeserializeOwned; -use serde::Serialize; +pub use futures_util::{future, stream}; +use serde::{de::DeserializeOwned, Serialize}; use serde_json::Value; use std::sync::Arc; @@ -44,6 +46,7 @@ pub struct BlockchainConfig { } impl BlockchainConfig { + #[must_use] pub fn network(&self) -> NetworkIdentifier { NetworkIdentifier { blockchain: self.blockchain.into(), @@ -52,6 +55,7 @@ impl BlockchainConfig { } } + #[must_use] pub fn currency(&self) -> Currency { Currency { symbol: self.currency_symbol.into(), @@ -60,10 +64,12 @@ impl BlockchainConfig { } } + #[must_use] pub fn node_url(&self) -> String { self.node_uri.with_host("rosetta.analog.one").to_string() } + #[must_use] pub fn connector_url(&self) -> String { format!("http://rosetta.analog.one:{}", self.connector_port) } @@ -135,56 +141,56 @@ where type EventStream<'a> = ::EventStream<'a>; fn config(&self) -> &BlockchainConfig { - BlockchainClient::config(Arc::as_ref(self)) + BlockchainClient::config(Self::as_ref(self)) } fn genesis_block(&self) -> &BlockIdentifier { - BlockchainClient::genesis_block(Arc::as_ref(self)) + BlockchainClient::genesis_block(Self::as_ref(self)) } async fn node_version(&self) -> Result { - BlockchainClient::node_version(Arc::as_ref(self)).await + BlockchainClient::node_version(Self::as_ref(self)).await } async fn current_block(&self) -> Result { - BlockchainClient::current_block(Arc::as_ref(self)).await + BlockchainClient::current_block(Self::as_ref(self)).await } async fn finalized_block(&self) -> Result { - BlockchainClient::finalized_block(Arc::as_ref(self)).await + BlockchainClient::finalized_block(Self::as_ref(self)).await } async fn balance(&self, address: &Address, block: &BlockIdentifier) -> Result { - BlockchainClient::balance(Arc::as_ref(self), address, block).await + BlockchainClient::balance(Self::as_ref(self), address, block).await } async fn coins(&self, address: &Address, block: &BlockIdentifier) -> Result> { - BlockchainClient::coins(Arc::as_ref(self), address, block).await + BlockchainClient::coins(Self::as_ref(self), address, block).await } async fn faucet(&self, address: &Address, param: u128) -> Result> { - BlockchainClient::faucet(Arc::as_ref(self), address, param).await + BlockchainClient::faucet(Self::as_ref(self), address, param).await } async fn metadata( &self, public_key: &PublicKey, params: &Self::MetadataParams, ) -> Result { - BlockchainClient::metadata(Arc::as_ref(self), public_key, params).await + BlockchainClient::metadata(Self::as_ref(self), public_key, params).await } async fn submit(&self, transaction: &[u8]) -> Result> { - BlockchainClient::submit(Arc::as_ref(self), transaction).await + BlockchainClient::submit(Self::as_ref(self), transaction).await } async fn block(&self, block: &PartialBlockIdentifier) -> Result { - BlockchainClient::block(Arc::as_ref(self), block).await + BlockchainClient::block(Self::as_ref(self), block).await } async fn block_transaction( &self, block: &BlockIdentifier, tx: &TransactionIdentifier, ) -> Result { - BlockchainClient::block_transaction(Arc::as_ref(self), block, tx).await + BlockchainClient::block_transaction(Self::as_ref(self), block, tx).await } async fn call(&self, req: &CallRequest) -> Result { - BlockchainClient::call(Arc::as_ref(self), req).await + BlockchainClient::call(Self::as_ref(self), req).await } /// Return a stream of events, return None if the blockchain doesn't support events. async fn listen<'a>(&'a self) -> Result>> { - BlockchainClient::listen(Arc::as_ref(self)).await + BlockchainClient::listen(Self::as_ref(self)).await } } @@ -196,21 +202,19 @@ pub trait RosettaAlgorithm { impl RosettaAlgorithm for Algorithm { fn to_signature_type(self) -> SignatureType { match self { - Algorithm::EcdsaSecp256k1 => SignatureType::Ecdsa, - Algorithm::EcdsaRecoverableSecp256k1 => SignatureType::EcdsaRecovery, - Algorithm::EcdsaSecp256r1 => SignatureType::Ecdsa, - Algorithm::Ed25519 => SignatureType::Ed25519, - Algorithm::Sr25519 => SignatureType::Sr25519, + Self::EcdsaSecp256k1 | Self::EcdsaSecp256r1 => SignatureType::Ecdsa, + Self::EcdsaRecoverableSecp256k1 => SignatureType::EcdsaRecovery, + Self::Ed25519 => SignatureType::Ed25519, + Self::Sr25519 => SignatureType::Sr25519, } } fn to_curve_type(self) -> CurveType { match self { - Algorithm::EcdsaSecp256k1 => CurveType::Secp256k1, - Algorithm::EcdsaRecoverableSecp256k1 => CurveType::Secp256k1, - Algorithm::EcdsaSecp256r1 => CurveType::Secp256r1, - Algorithm::Ed25519 => CurveType::Edwards25519, - Algorithm::Sr25519 => CurveType::Schnorrkel, + Self::EcdsaSecp256k1 | Self::EcdsaRecoverableSecp256k1 => CurveType::Secp256k1, + Self::EcdsaSecp256r1 => CurveType::Secp256r1, + Self::Ed25519 => CurveType::Edwards25519, + Self::Sr25519 => CurveType::Schnorrkel, } } } @@ -219,8 +223,16 @@ pub trait TransactionBuilder: Default + Sized { type MetadataParams: Serialize + Clone; type Metadata: DeserializeOwned + Sized + Send + Sync + 'static; + /// Returns the transfer metadata parameters + /// + /// # Errors + /// Returns `Err` if for some reason it cannot construct the metadata parameters. fn transfer(&self, address: &Address, amount: u128) -> Result; + /// Returns the call metadata parameters + /// + /// # Errors + /// Returns `Err` if for some reason it cannot construct the metadata parameters. fn method_call( &self, contract: &str, @@ -229,6 +241,10 @@ pub trait TransactionBuilder: Default + Sized { amount: u128, ) -> Result; + /// Retrieve the metadata parameters for deploying a smart-contract + /// + /// # Errors + /// Returns `Err` if for some reason it cannot construct the metadata parameters. fn deploy_contract(&self, contract_binary: Vec) -> Result; fn create_and_sign( diff --git a/rosetta-core/src/node_uri.rs b/rosetta-core/src/node_uri.rs index 1cd82630..c96f2259 100644 --- a/rosetta-core/src/node_uri.rs +++ b/rosetta-core/src/node_uri.rs @@ -2,7 +2,7 @@ use fluent_uri::Uri; use std::fmt::{Display, Formatter}; use thiserror::Error; -#[derive(Debug, Error, PartialEq)] +#[derive(Debug, Error, PartialEq, Eq)] pub enum NodeUriError { #[error("Invalid uri")] InvalidUri, @@ -22,7 +22,7 @@ pub enum NodeUriError { /// | | | | | | | /// scheme userinfo host port path query fragment /// ``` -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct NodeUri<'a> { pub scheme: &'a str, pub userinfo: Option<&'a str>, @@ -34,13 +34,21 @@ pub struct NodeUri<'a> { } impl<'a> NodeUri<'a> { + /// Parses a URI reference from a byte sequence into a Uri<&str>. + /// This function validates the input strictly except that UTF-8 validation is not performed on + /// a percent-encoded registered name (see Section 3.2.2, RFC 3986 ). Care should be taken when + /// dealing with such cases. # Errors + /// + /// # Errors + /// The provided url must contain [`fluent_uri::Scheme`], [`fluent_uri::Host`] and port, + /// otherwise returns `Err` pub fn parse(s: &'a str) -> Result { let uri = Uri::parse(s).map_err(|_| NodeUriError::InvalidUri)?; // Parse uri components. let scheme = uri .scheme() - .map(|scheme| scheme.as_str()) + .map(fluent_uri::Scheme::as_str) .ok_or(NodeUriError::InvalidScheme)?; if scheme.is_empty() { return Err(NodeUriError::InvalidScheme); @@ -55,7 +63,7 @@ impl<'a> NodeUri<'a> { .ok_or(NodeUriError::InvalidPort)? .parse::() .map_err(|_| NodeUriError::InvalidPort)?; - let userinfo = authority.userinfo().map(|userinfo| userinfo.as_str()); + let userinfo = authority.userinfo().map(fluent_uri::enc::EStr::as_str); Ok(Self { scheme, @@ -63,12 +71,13 @@ impl<'a> NodeUri<'a> { host, port, path: uri.path().as_str(), - query: uri.query().map(|query| query.as_str()), - fragment: uri.fragment().map(|fragment| fragment.as_str()), + query: uri.query().map(fluent_uri::enc::EStr::as_str), + fragment: uri.fragment().map(fluent_uri::enc::EStr::as_str), }) } - pub fn with_host<'b, 'c: 'b>(&'b self, host: &'c str) -> NodeUri<'b> { + #[must_use] + pub const fn with_host<'b, 'c: 'b>(&'b self, host: &'c str) -> NodeUri<'b> { NodeUri { scheme: self.scheme, userinfo: self.userinfo, @@ -80,7 +89,8 @@ impl<'a> NodeUri<'a> { } } - pub fn with_scheme<'b, 'c: 'b>(&'b self, scheme: &'c str) -> NodeUri<'b> { + #[must_use] + pub const fn with_scheme<'b, 'c: 'b>(&'b self, scheme: &'c str) -> NodeUri<'b> { NodeUri { scheme, userinfo: self.userinfo, @@ -100,7 +110,7 @@ impl Display for NodeUri<'_> { // userinfo@ if let Some(userinfo) = self.userinfo { - write!(f, "{}@", userinfo)?; + write!(f, "{userinfo}@")?; } // host:port/path @@ -108,12 +118,12 @@ impl Display for NodeUri<'_> { // ?query if let Some(query) = self.query { - write!(f, "?{}", query)?; + write!(f, "?{query}")?; } // #fragment if let Some(fragment) = self.fragment { - write!(f, "#{}", fragment)?; + write!(f, "#{fragment}")?; } Ok(()) } diff --git a/rosetta-crypto/src/address/bech32.rs b/rosetta-crypto/src/address/bech32.rs index 4782ad87..87969360 100644 --- a/rosetta-crypto/src/address/bech32.rs +++ b/rosetta-crypto/src/address/bech32.rs @@ -1,6 +1,7 @@ use bech32::{u5, ToBase32, Variant}; use sha2::Digest; +#[allow(clippy::unwrap_used)] pub fn bech32_encode(hrp: &str, public_key: &[u8]) -> String { let sha2 = sha2::Sha256::digest(public_key); let ripemd = ripemd::Ripemd160::digest(sha2); diff --git a/rosetta-crypto/src/address/eip55.rs b/rosetta-crypto/src/address/eip55.rs index a0fd0e94..61eb220a 100644 --- a/rosetta-crypto/src/address/eip55.rs +++ b/rosetta-crypto/src/address/eip55.rs @@ -1,6 +1,7 @@ use crate::{Algorithm, PublicKey}; use sha3::Digest; +#[allow(clippy::unwrap_used)] pub fn eip55_encode(public_key: &[u8]) -> String { let uncompressed = PublicKey::from_bytes(Algorithm::EcdsaSecp256k1, public_key) .unwrap() @@ -9,6 +10,7 @@ pub fn eip55_encode(public_key: &[u8]) -> String { eip55_encode_bytes(&digest[12..]) } +#[allow(clippy::unwrap_used)] fn eip55_encode_bytes(bytes: &[u8]) -> String { let address = hex::encode(bytes); let hashed_address = hex::encode(sha3::Keccak256::digest(&address)); @@ -18,7 +20,7 @@ fn eip55_encode_bytes(bytes: &[u8]) -> String { if character.is_alphabetic() { let nibble = hashed_address.as_bytes()[nibble_index] as char; if nibble.to_digit(16).unwrap() > 7 { - character = character.to_ascii_uppercase() + character = character.to_ascii_uppercase(); } } result.push(character); diff --git a/rosetta-crypto/src/address/mod.rs b/rosetta-crypto/src/address/mod.rs index 6d4a7d50..56ad793f 100644 --- a/rosetta-crypto/src/address/mod.rs +++ b/rosetta-crypto/src/address/mod.rs @@ -1,7 +1,5 @@ //! Support for various blockchain address formats. -use crate::bip32::DerivedPublicKey; -use crate::error::AddressError; -use crate::PublicKey; +use crate::{bip32::DerivedPublicKey, error::AddressError, PublicKey}; use ethers::types::H160; use sp_core::{ crypto::{AccountId32, Ss58Codec}, @@ -46,11 +44,13 @@ pub struct Address { impl Address { /// Creates a new address. - pub fn new(format: AddressFormat, address: String) -> Self { + #[must_use] + pub const fn new(format: AddressFormat, address: String) -> Self { Self { format, address } } /// Formats the public key as an address. + #[must_use] pub fn from_public_key_bytes(format: AddressFormat, public_key: &[u8]) -> Self { let address = match format { AddressFormat::Bech32(hrp) => bech32::bech32_encode(hrp, public_key), @@ -61,15 +61,17 @@ impl Address { } /// Converts an EVM address to its corresponding SS58 address. - /// reference: https://github.com/polkadot-js/common/blob/v12.3.2/packages/util-crypto/src/address/evmToAddress.ts + /// reference: [evmToAddress.ts](https://github.com/polkadot-js/common/blob/v12.3.2/packages/util-crypto/src/address/evmToAddress.ts) + /// + /// # Errors + /// + /// Will return `Err` when `self.address` is not a valid 160bit hex string pub fn evm_to_ss58(&self, ss58format: Ss58AddressFormat) -> Result { if self.format != AddressFormat::Eip55 { return Err(AddressError::InvalidAddressFormat); } - let address: H160 = self - .address - .parse() - .map_err(|_| AddressError::FailedToDecodeAddress)?; + let address: H160 = + self.address.parse().map_err(|_| AddressError::FailedToDecodeAddress)?; let mut data = [0u8; 24]; data[0..4].copy_from_slice(b"evm:"); data[4..24].copy_from_slice(&address[..]); @@ -81,7 +83,12 @@ impl Address { } /// Converts an SS58 address to its corresponding EVM address. - /// reference: https://github.com/polkadot-js/common/blob/v12.3.2/packages/util-crypto/src/address/addressToEvm.ts#L13 + /// reference: [addressToEvm.ts](https://github.com/polkadot-js/common/blob/v12.3.2/packages/util-crypto/src/address/addressToEvm.ts#L13) + /// + /// # Errors + /// Will return `Err` when: + /// * self.format is not [`AddressFormat::Ss58`] + /// * self.address is not a valid SS58 address string pub fn ss58_to_evm(&self) -> Result { if !matches!(self.format, AddressFormat::Ss58(_)) { return Err(AddressError::InvalidAddressFormat); @@ -89,18 +96,17 @@ impl Address { let ss58_addr = ::from_string(&self.address) .map_err(|_| AddressError::FailedToDecodeAddress)?; let bytes: [u8; 32] = ss58_addr.into(); - Ok(Self { - format: AddressFormat::Eip55, - address: hex::encode(&bytes[0..20]), - }) + Ok(Self { format: AddressFormat::Eip55, address: hex::encode(&bytes[0..20]) }) } /// Returns the format of the address. - pub fn format(&self) -> AddressFormat { + #[must_use] + pub const fn format(&self) -> AddressFormat { self.format } /// Returns the address. + #[must_use] pub fn address(&self) -> &str { &self.address } @@ -114,6 +120,7 @@ impl From

for String { impl PublicKey { /// Returns the address of a public key. + #[must_use] pub fn to_address(&self, format: AddressFormat) -> Address { Address::from_public_key_bytes(format, &self.to_bytes()) } @@ -121,6 +128,7 @@ impl PublicKey { impl DerivedPublicKey { /// Returns the address of a public key. + #[must_use] pub fn to_address(&self, format: AddressFormat) -> Address { Address::from_public_key_bytes(format, &self.public_key().to_bytes()) } diff --git a/rosetta-crypto/src/address/ss58.rs b/rosetta-crypto/src/address/ss58.rs index a384b29b..aaf91fe7 100644 --- a/rosetta-crypto/src/address/ss58.rs +++ b/rosetta-crypto/src/address/ss58.rs @@ -5,15 +5,20 @@ pub fn ss58_encode(address_format: Ss58AddressFormat, public_key: &[u8]) -> Stri // We mask out the upper two bits of the ident - SS58 Prefix currently only supports 14-bits let ident: u16 = u16::from(address_format) & 0b0011_1111_1111_1111; let mut v = match ident { - 0..=63 => vec![ident as u8], + 0..=63 => { + // The value will not truncate once is between 0 and 63 + #[allow(clippy::cast_possible_truncation)] + let ident = ident as u8; + vec![ident] + }, 64..=16_383 => { // upper six bits of the lower byte(!) let first = ((ident & 0b0000_0000_1111_1100) as u8) >> 2; // lower two bits of the lower byte in the high pos, // lower bits of the upper byte in the low pos let second = ((ident >> 8) as u8) | ((ident & 0b0000_0000_0000_0011) as u8) << 6; - vec![first | 0b01000000, second] - } + vec![first | 0b0100_0000, second] + }, _ => unreachable!("masked out the upper two bits; qed"), }; v.extend(public_key); diff --git a/rosetta-crypto/src/bip32.rs b/rosetta-crypto/src/bip32.rs index 2fc52821..75f81a76 100644 --- a/rosetta-crypto/src/bip32.rs +++ b/rosetta-crypto/src/bip32.rs @@ -1,7 +1,5 @@ //! BIP32 implementation. -use crate::bip39::Mnemonic; -use crate::bip44::ChildNumber; -use crate::{Algorithm, PublicKey, SecretKey}; +use crate::{bip39::Mnemonic, bip44::ChildNumber, Algorithm, PublicKey, SecretKey}; use anyhow::Result; use hmac::{Hmac, Mac}; use sha2::Sha512; @@ -9,39 +7,33 @@ use sha2::Sha512; impl Algorithm { /// If the algorithm supports BIP32. ECDSA and Ed25519 do, but schnorrkel /// uses it's own hierarchical key derivation algorithm. - fn supports_bip32(self) -> bool { - !matches!(self, Algorithm::Sr25519) + const fn supports_bip32(self) -> bool { + !matches!(self, Self::Sr25519) } /// If the algorithm supports soft key derivations. ECDSA and schnorrkel /// do, but ed25519 does not. - fn supports_non_hardened_derivation(self) -> bool { - !matches!(self, Algorithm::Ed25519) + const fn supports_non_hardened_derivation(self) -> bool { + !matches!(self, Self::Ed25519) } /// BIP32 defines a retry procedure for secp256k1. - fn uses_bip32_retry(self) -> bool { - matches!( - self, - Algorithm::EcdsaSecp256k1 | Algorithm::EcdsaRecoverableSecp256k1 - ) + const fn uses_bip32_retry(self) -> bool { + matches!(self, Self::EcdsaSecp256k1 | Self::EcdsaRecoverableSecp256k1) } /// SLIP0010 defines a retry procedure for secp256r1. - fn uses_slip10_retry(self) -> bool { - matches!(self, Algorithm::EcdsaSecp256r1) + const fn uses_slip10_retry(self) -> bool { + matches!(self, Self::EcdsaSecp256r1) } } impl SecretKey { - fn tweak_add(&self, secret_key: &SecretKey) -> Result> { + fn tweak_add(&self, secret_key: &Self) -> Result> { use ecdsa::elliptic_curve::NonZeroScalar; match (self, secret_key) { - (SecretKey::EcdsaSecp256k1(secret), SecretKey::EcdsaSecp256k1(secret2)) - | ( - SecretKey::EcdsaRecoverableSecp256k1(secret), - SecretKey::EcdsaRecoverableSecp256k1(secret2), - ) => { + (Self::EcdsaSecp256k1(secret), Self::EcdsaSecp256k1(secret2)) | + (Self::EcdsaRecoverableSecp256k1(secret), Self::EcdsaRecoverableSecp256k1(secret2)) => { let scalar = secret.as_nonzero_scalar().as_ref(); let tweak = secret2.as_nonzero_scalar().as_ref(); let scalar: Option> = @@ -51,23 +43,21 @@ impl SecretKey { None => return Ok(None), }; Ok(Some(if self.algorithm().is_recoverable() { - SecretKey::EcdsaRecoverableSecp256k1(signing_key) + Self::EcdsaRecoverableSecp256k1(signing_key) } else { - SecretKey::EcdsaSecp256k1(signing_key) + Self::EcdsaSecp256k1(signing_key) })) - } - (SecretKey::EcdsaSecp256r1(secret), SecretKey::EcdsaSecp256r1(secret2)) => { + }, + (Self::EcdsaSecp256r1(secret), Self::EcdsaSecp256r1(secret2)) => { let scalar = secret.as_nonzero_scalar().as_ref(); let tweak = secret2.as_nonzero_scalar().as_ref(); let scalar: Option> = Option::from(NonZeroScalar::new(scalar + tweak)); - match scalar { - Some(scalar) => Ok(Some(SecretKey::EcdsaSecp256r1(ecdsa::SigningKey::from( - scalar, - )))), - None => Ok(None), - } - } + scalar.map_or_else( + || Ok(None), + |scalar| Ok(Some(Self::EcdsaSecp256r1(ecdsa::SigningKey::from(scalar)))), + ) + }, _ => anyhow::bail!("unsupported key type"), } } @@ -76,27 +66,26 @@ impl SecretKey { impl PublicKey { fn tweak_add(&self, tweak: [u8; 32]) -> Result> { match self { - PublicKey::EcdsaSecp256k1(public) | PublicKey::EcdsaRecoverableSecp256k1(public) => { - Ok((|| { - let parent_key = k256::ProjectivePoint::from(public.as_affine()); - let tweak = k256::NonZeroScalar::try_from(&tweak[..]).ok()?; - let mut tweak_point = k256::ProjectivePoint::GENERATOR * tweak.as_ref(); - tweak_point += parent_key; - let public = ecdsa::VerifyingKey::from_affine(tweak_point.to_affine()).ok()?; - Some(if self.algorithm().is_recoverable() { - PublicKey::EcdsaRecoverableSecp256k1(public) - } else { - PublicKey::EcdsaSecp256k1(public) - }) - })()) - } - PublicKey::EcdsaSecp256r1(public) => Ok((|| { + Self::EcdsaSecp256k1(public) | Self::EcdsaRecoverableSecp256k1(public) => Ok((|| { + let parent_key = k256::ProjectivePoint::from(public.as_affine()); + let tweak = k256::NonZeroScalar::try_from(&tweak[..]).ok()?; + let mut tweak_point = k256::ProjectivePoint::GENERATOR * tweak.as_ref(); + tweak_point += parent_key; + let public = ecdsa::VerifyingKey::from_affine(tweak_point.to_affine()).ok()?; + Some(if self.algorithm().is_recoverable() { + Self::EcdsaRecoverableSecp256k1(public) + } else { + Self::EcdsaSecp256k1(public) + }) + })( + )), + Self::EcdsaSecp256r1(public) => Ok((|| { let parent_key = p256::ProjectivePoint::from(public.as_affine()); let tweak = p256::NonZeroScalar::try_from(&tweak[..]).ok()?; let mut tweak_point = p256::ProjectivePoint::GENERATOR * tweak.as_ref(); tweak_point += parent_key; let public = ecdsa::VerifyingKey::from_affine(tweak_point.to_affine()).ok()?; - Some(PublicKey::EcdsaSecp256r1(public)) + Some(Self::EcdsaSecp256r1(public)) })()), _ => anyhow::bail!("unsupported key type"), } @@ -112,6 +101,10 @@ pub struct DerivedSecretKey { impl DerivedSecretKey { /// Derives a master key from a mnemonic. + /// + /// # Errors + /// + /// Will return `Err` when the chain code is invalid pub fn new(mnemonic: &Mnemonic, password: &str, algorithm: Algorithm) -> Result { if algorithm == Algorithm::Sr25519 { Self::substrate(mnemonic, password, algorithm) @@ -121,6 +114,10 @@ impl DerivedSecretKey { } /// Derives a master key and chain code from a mnemonic using BIP39. + /// + /// # Errors + /// + /// Will return `Err` when the chain code is invalid pub fn bip39(mnemonic: &Mnemonic, password: &str, algorithm: Algorithm) -> Result { let seed = mnemonic.to_seed(password); Self::bip32_master_key(&seed[..], algorithm) @@ -129,8 +126,9 @@ impl DerivedSecretKey { /// Derives a BIP32 master key. See SLIP0010 for extension to secp256r1 and ed25519 curves. fn bip32_master_key(seed: &[u8], algorithm: Algorithm) -> Result { let curve_name = match algorithm { - Algorithm::EcdsaSecp256k1 => &b"Bitcoin seed"[..], - Algorithm::EcdsaRecoverableSecp256k1 => &b"Bitcoin seed"[..], + Algorithm::EcdsaRecoverableSecp256k1 | Algorithm::EcdsaSecp256k1 => { + &b"Bitcoin seed"[..] + }, Algorithm::EcdsaSecp256r1 => &b"Nist256p1 seed"[..], Algorithm::Ed25519 => &b"ed25519 seed"[..], Algorithm::Sr25519 => anyhow::bail!("sr25519 does not support bip32 derivation"), @@ -145,24 +143,27 @@ impl DerivedSecretKey { } let result = hmac.finalize().into_bytes(); let (secret_key, chain_code) = result.split_at(32); - let secret_key = if let Ok(secret_key) = SecretKey::from_bytes(algorithm, secret_key) { - secret_key - } else if algorithm.uses_slip10_retry() { - retry = Some(result.into()); - continue; - } else { - anyhow::bail!("failed to derive a valid secret key"); + let secret_key = match SecretKey::from_bytes(algorithm, secret_key) { + Ok(secret_key) => secret_key, + _ if algorithm.uses_slip10_retry() => { + retry = Some(result.into()); + continue; + }, + _ => { + anyhow::bail!("failed to derive a valid secret key"); + }, }; - return Ok(Self { - secret_key, - chain_code: chain_code.try_into()?, - }); + return Ok(Self { secret_key, chain_code: chain_code.try_into()? }); } } /// Derives a master key and chain code from a mnemonic. This avoids the complex BIP39 /// seed generation algorithm which was intended to support brain wallets. Instead it /// uses pbkdf2 using the entropy as the key and password as the salt. + /// + /// # Errors + /// + /// Only supports [`Algorithm::Ed25519`] and [`Algorithm::Sr25519`], otherwise returns `Err` pub fn substrate(mnemonic: &Mnemonic, password: &str, algorithm: Algorithm) -> Result { let (entropy, len) = mnemonic.to_entropy_array(); let seed = substrate_bip39::seed_from_entropy(&entropy[..len], password) @@ -176,16 +177,19 @@ impl DerivedSecretKey { } /// The secret key used to sign messages. - pub fn secret_key(&self) -> &SecretKey { + #[must_use] + pub const fn secret_key(&self) -> &SecretKey { &self.secret_key } /// The chain code used to derive child keys. - pub fn chain_code(&self) -> &[u8; 32] { + #[must_use] + pub const fn chain_code(&self) -> &[u8; 32] { &self.chain_code } /// Returns the derived public key used for verifying signatures. + #[must_use] pub fn public_key(&self) -> DerivedPublicKey { DerivedPublicKey::new(self.secret_key.public_key(), self.chain_code) } @@ -218,16 +222,14 @@ impl DerivedSecretKey { let chain_code: [u8; 32] = chain_code.try_into()?; retry = Some(chain_code); - let mut secret_key = - if let Ok(secret_key) = SecretKey::from_bytes(algorithm, secret_key) { - secret_key - } else if algorithm.uses_slip10_retry() { - continue; - } else if algorithm.uses_bip32_retry() { - return self.bip32_derive(child + 1); - } else { + let mut secret_key = match SecretKey::from_bytes(algorithm, secret_key) { + Ok(secret_key) => secret_key, + _ if algorithm.uses_slip10_retry() => continue, + _ if algorithm.uses_bip32_retry() => return self.bip32_derive(child + 1), + _ => { anyhow::bail!("failed to derive a valid secret key"); - }; + }, + }; if algorithm.supports_non_hardened_derivation() { if let Some(tweaked_secret_key) = secret_key.tweak_add(&self.secret_key)? { @@ -241,14 +243,16 @@ impl DerivedSecretKey { } } - return Ok(Self { - secret_key, - chain_code, - }); + return Ok(Self { secret_key, chain_code }); } } /// Derives a child secret key. + /// + /// # Errors + /// + /// Will return `Err` if the derivation is invalid or if the [`SecretKey`] doesn't support + /// derivation pub fn derive(&self, child: ChildNumber) -> Result { match &self.secret_key { SecretKey::Sr25519(secret, _) => { @@ -267,7 +271,7 @@ impl DerivedSecretKey { secret_key: SecretKey::Sr25519(secret, minisecret), chain_code: chain_code.0, }) - } + }, _ => self.bip32_derive(child), } } @@ -282,20 +286,20 @@ pub struct DerivedPublicKey { impl DerivedPublicKey { /// Constructs a derived public key from a public key and a chain code. - pub fn new(public_key: PublicKey, chain_code: [u8; 32]) -> Self { - Self { - public_key, - chain_code, - } + #[must_use] + pub const fn new(public_key: PublicKey, chain_code: [u8; 32]) -> Self { + Self { public_key, chain_code } } /// The public key used to verify messages. - pub fn public_key(&self) -> &PublicKey { + #[must_use] + pub const fn public_key(&self) -> &PublicKey { &self.public_key } /// The chain code used to derive child keys. - pub fn chain_code(&self) -> &[u8; 32] { + #[must_use] + pub const fn chain_code(&self) -> &[u8; 32] { &self.chain_code } @@ -324,25 +328,27 @@ impl DerivedPublicKey { let public_key: [u8; 32] = public_key.try_into()?; let chain_code: [u8; 32] = chain_code.try_into()?; - let public_key = if let Some(public_key) = self.public_key.tweak_add(public_key)? { - public_key - } else if algorithm.uses_slip10_retry() { - retry = Some(chain_code); - continue; - } else if algorithm.uses_bip32_retry() { - return self.bip32_derive(child + 1); - } else { - anyhow::bail!("failed to derive a valid public key"); + let public_key = match self.public_key.tweak_add(public_key)? { + Some(public_key) => public_key, + _ if algorithm.uses_slip10_retry() => { + retry = Some(chain_code); + continue; + }, + _ if algorithm.uses_bip32_retry() => return self.bip32_derive(child + 1), + _ => { + anyhow::bail!("failed to derive a valid public key"); + }, }; - return Ok(Self { - public_key, - chain_code, - }); + return Ok(Self { public_key, chain_code }); } } /// Derives a child public key. + /// + /// # Errors + /// + /// Can't derive a hardened public key pub fn derive(&self, child: ChildNumber) -> Result { anyhow::ensure!(child.is_normal(), "can't derive a hardened public key"); match &self.public_key { @@ -350,11 +356,8 @@ impl DerivedPublicKey { use schnorrkel::derive::Derivation; let chain_code = schnorrkel::derive::ChainCode(child.to_substrate_chain_code()); let (public, _) = public.derived_key_simple(chain_code, b""); - Ok(Self { - public_key: PublicKey::Sr25519(public), - chain_code: chain_code.0, - }) - } + Ok(Self { public_key: PublicKey::Sr25519(public), chain_code: chain_code.0 }) + }, _ => self.bip32_derive(child), } } @@ -395,11 +398,7 @@ mod tests { } fn assert(&self, chain_code: &str, private: &str, public: &str) { - assert_eq!( - hex::encode(self.secret.chain_code()), - chain_code, - "secret chain code" - ); + assert_eq!(hex::encode(self.secret.chain_code()), chain_code, "secret chain code"); assert_eq!( // in sr25519 soft derivations the nonce part of the secret key is computed // randomly so only the first 32 bytes are deterministic. all other keys are @@ -408,29 +407,17 @@ mod tests { private, "secret key" ); - assert_eq!( - hex::encode(self.public.chain_code()), - chain_code, - "secret chain code" - ); - assert_eq!( - hex::encode(self.public.public_key().to_bytes()), - public, - "public key" - ); + assert_eq!(hex::encode(self.public.chain_code()), chain_code, "secret chain code"); + assert_eq!(hex::encode(self.public.public_key().to_bytes()), public, "public key"); } fn assert_bip32(&self, xprv: &str, xpub: &str) { assert_eq!(&xprv[..4], "xprv"); assert_eq!(&xpub[..4], "xpub"); - let xprv = bs58::decode(xprv) - .with_alphabet(bs58::Alphabet::BITCOIN) - .into_vec() - .unwrap(); - let xpub = bs58::decode(xpub) - .with_alphabet(bs58::Alphabet::BITCOIN) - .into_vec() - .unwrap(); + let xprv = + bs58::decode(xprv).with_alphabet(bs58::Alphabet::BITCOIN).into_vec().unwrap(); + let xpub = + bs58::decode(xpub).with_alphabet(bs58::Alphabet::BITCOIN).into_vec().unwrap(); let chain_code1 = &xprv[13..45]; let chain_code2 = &xpub[13..45]; assert_eq!(chain_code1, chain_code2); @@ -444,10 +431,7 @@ mod tests { #[test] fn bip32_derive_secp256k1_1() -> Result<()> { // test vectors from SLIP0010 - for algorithm in [ - Algorithm::EcdsaSecp256k1, - Algorithm::EcdsaRecoverableSecp256k1, - ] { + for algorithm in [Algorithm::EcdsaSecp256k1, Algorithm::EcdsaRecoverableSecp256k1] { let key = DerivedKey::bip32_master_key(algorithm, "000102030405060708090a0b0c0d0e0f")?; key.assert( "873dff81c02f525623fd1fe5167eac3a55a049de3d314bb42ee227ffed37d508", @@ -478,7 +462,7 @@ mod tests { "0f479245fb19a38a1954c5c7c0ebab2f9bdfd96a17563ef28a6a4b1a2a764ef4", "02e8445082a72f29b75ca48748a914df60622a609cacfce8ed0e35804560741d29", ); - let key = key.bip32_derive(ChildNumber::non_hardened_from_u32(1000000000))?; + let key = key.bip32_derive(ChildNumber::non_hardened_from_u32(1_000_000_000))?; key.assert( "c783e67b921d2beb8f6b389cc646d7263b4145701dadd2161548a8b078e65e9e", "471b76e389e528d6de6d816857e012c5455051cad6660850e58372a6c3e6e7c8", @@ -524,7 +508,7 @@ mod tests { "5996c37fd3dd2679039b23ed6f70b506c6b56b3cb5e424681fb0fa64caf82aaa", "029f871f4cb9e1c97f9f4de9ccd0d4a2f2a171110c61178f84430062230833ff20", ); - let key = key.bip32_derive(ChildNumber::non_hardened_from_u32(1000000000))?; + let key = key.bip32_derive(ChildNumber::non_hardened_from_u32(1_000_000_000))?; key.assert( "b9b7b82d326bb9cb5b5b121066feea4eb93d5241103c9e7a18aad40f1dde8059", "21c4f269ef0a5fd1badf47eeacebeeaa3de22eb8e5b0adcd0f27dd99d34d0119", @@ -567,7 +551,7 @@ mod tests { "30d1dc7e5fc04c31219ab25a27ae00b50f6fd66622f6e9c913253d6511d1e662", "8abae2d66361c879b900d204ad2cc4984fa2aa344dd7ddc46007329ac76c429c", ); - let key = key.bip32_derive(ChildNumber::hardened_from_u32(1000000000))?; + let key = key.bip32_derive(ChildNumber::hardened_from_u32(1_000_000_000))?; key.assert( "68789923a0cac2cd5a29172a475fe9e0fb14cd6adb5ad98a3fa70333e7afa230", "8f94d394a8e8fd6b1bc2f3f49f5c47e385281d5c17e65324b0f62483e37e8793", @@ -612,7 +596,7 @@ mod tests { "5bdcd9d165e7e7f2c27c3e98edd10cc5f50a07dbd69b0e49171f531dae11890e", "da7b3bb8a92351f89c4a996561c8323fd534c9a6a6f713ffce316e0e95169c58", ); - let key = key.bip32_derive(ChildNumber::non_hardened_from_u32(1000000000))?; + let key = key.bip32_derive(ChildNumber::non_hardened_from_u32(1_000_000_000))?; key.assert( "00ca9a3b00000000000000000000000000000000000000000000000000000000", "8800b77abcbe366d2afa4c65e1c47884e5cd0ff81824d7cb10b8a3aa2a044a0f", @@ -626,10 +610,7 @@ mod tests { #[test] fn bip32_derive_secp256k1_2() -> Result<()> { // test vectors from SLIP0010 - for algorithm in [ - Algorithm::EcdsaSecp256k1, - Algorithm::EcdsaRecoverableSecp256k1, - ] { + for algorithm in [Algorithm::EcdsaSecp256k1, Algorithm::EcdsaRecoverableSecp256k1] { let key = DerivedKey::bip32_master_key(algorithm, SEED2)?; key.assert( "60499f801b896d83179a4374aeb7822aaeaceaa0db1f85ee3e904c4defbd9689", @@ -642,7 +623,7 @@ mod tests { "abe74a98f6c7eabee0428f53798f0ab8aa1bd37873999041703c742f15ac7e1e", "02fc9e5af0ac8d9b3cecfe2a888e2117ba3d089d8585886c9c826b6b22a98d12ea", ); - let key = key.bip32_derive(ChildNumber::hardened_from_u32(2147483647))?; + let key = key.bip32_derive(ChildNumber::hardened_from_u32(2_147_483_647))?; key.assert( "be17a268474a6bb9c61e1d720cf6215e2a88c5406c4aee7b38547f585c9a37d9", "877c779ad9687164e9c2f4f0f4ff0340814392330693ce95a58fe18fd52e6e93", @@ -654,7 +635,7 @@ mod tests { "704addf544a06e5ee4bea37098463c23613da32020d604506da8c0518e1da4b7", "03a7d1d856deb74c508e05031f9895dab54626251b3806e16b4bd12e781a7df5b9", ); - let key = key.bip32_derive(ChildNumber::hardened_from_u32(2147483646))?; + let key = key.bip32_derive(ChildNumber::hardened_from_u32(2_147_483_646))?; key.assert( "637807030d55d01f9a0cb3a7839515d796bd07706386a6eddf06cc29a65a0e29", "f1c7c871a54a804afe328b4c83a1c33b8e5ff48f5087273f04efa83b247d6a2d", @@ -685,7 +666,7 @@ mod tests { "d7d065f63a62624888500cdb4f88b6d59c2927fee9e6d0cdff9cad555884df6e", "039b6df4bece7b6c81e2adfeea4bcf5c8c8a6e40ea7ffa3cf6e8494c61a1fc82cc", ); - let key = key.bip32_derive(ChildNumber::hardened_from_u32(2147483647))?; + let key = key.bip32_derive(ChildNumber::hardened_from_u32(2_147_483_647))?; key.assert( "f235b2bc5c04606ca9c30027a84f353acf4e4683edbd11f635d0dcc1cd106ea6", "96d2ec9316746a75e7793684ed01e3d51194d81a42a3276858a5b7376d4b94b9", @@ -697,7 +678,7 @@ mod tests { "974f9096ea6873a915910e82b29d7c338542ccde39d2064d1cc228f371542bbc", "03abe0ad54c97c1d654c1852dfdc32d6d3e487e75fa16f0fd6304b9ceae4220c64", ); - let key = key.bip32_derive(ChildNumber::hardened_from_u32(2147483646))?; + let key = key.bip32_derive(ChildNumber::hardened_from_u32(2_147_483_646))?; key.assert( "5794e616eadaf33413aa309318a26ee0fd5163b70466de7a4512fd4b1a5c9e6a", "da29649bbfaff095cd43819eda9a7be74236539a29094cd8336b07ed8d4eff63", @@ -727,7 +708,7 @@ mod tests { "1559eb2bbec5790b0c65d8693e4d0875b1747f4970ae8b650486ed7470845635", "86fab68dcb57aa196c77c5f264f215a112c22a912c10d123b0d03c3c28ef1037", ); - let key = key.bip32_derive(ChildNumber::hardened_from_u32(2147483647))?; + let key = key.bip32_derive(ChildNumber::hardened_from_u32(2_147_483_647))?; key.assert( "138f0b2551bcafeca6ff2aa88ba8ed0ed8de070841f0c4ef0165df8181eaad7f", "ea4f5bfe8694d8bb74b7b59404632fd5968b774ed545e810de9c32a4fb4192f4", @@ -739,7 +720,7 @@ mod tests { "3757c7577170179c7868353ada796c839135b3d30554bbb74a4b1e4a5a58505c", "2e66aa57069c86cc18249aecf5cb5a9cebbfd6fadeab056254763874a9352b45", ); - let key = key.bip32_derive(ChildNumber::hardened_from_u32(2147483646))?; + let key = key.bip32_derive(ChildNumber::hardened_from_u32(2_147_483_646))?; key.assert( "0902fe8a29f9140480a00ef244bd183e8a13288e4412d8389d140aac1794825a", "5837736c89570de861ebc173b1086da4f505d4adb387c6a1b1342d5e4ac9ec72", diff --git a/rosetta-crypto/src/bip44.rs b/rosetta-crypto/src/bip44.rs index aa27d2e8..8a75f3ea 100644 --- a/rosetta-crypto/src/bip44.rs +++ b/rosetta-crypto/src/bip44.rs @@ -10,39 +10,46 @@ pub struct ChildNumber(u32); impl ChildNumber { /// Is a hard derivation. - pub fn is_hardened(&self) -> bool { + #[must_use] + pub const fn is_hardened(&self) -> bool { self.0 & HARDENED_BIT == HARDENED_BIT } /// Is a normal derivation. - pub fn is_normal(&self) -> bool { + #[must_use] + pub const fn is_normal(&self) -> bool { self.0 & HARDENED_BIT == 0 } /// Creates a new hard derivation. - pub fn hardened_from_u32(index: u32) -> Self { - ChildNumber(index | HARDENED_BIT) + #[must_use] + pub const fn hardened_from_u32(index: u32) -> Self { + Self(index | HARDENED_BIT) } /// Creates a new soft derivation. - pub fn non_hardened_from_u32(index: u32) -> Self { - ChildNumber(index) + #[must_use] + pub const fn non_hardened_from_u32(index: u32) -> Self { + Self(index) } /// Returns the index. - pub fn index(&self) -> u32 { + #[must_use] + pub const fn index(&self) -> u32 { self.0 & (i32::MAX as u32) } /// Returns BIP32 byte sequence. - pub fn to_bytes(&self) -> [u8; 4] { + #[must_use] + pub const fn to_bytes(&self) -> [u8; 4] { self.0.to_be_bytes() } /// Returns the substrate compatible chain code. + #[must_use] pub fn to_substrate_chain_code(&self) -> [u8; 32] { let mut chain_code = [0; 32]; - let bytes = (self.index() as u64).to_le_bytes(); + let bytes = u64::from(self.index()).to_le_bytes(); chain_code[..bytes.len()].copy_from_slice(&bytes[..]); chain_code } @@ -59,12 +66,9 @@ impl core::ops::Add for ChildNumber { impl FromStr for ChildNumber { type Err = anyhow::Error; - fn from_str(child: &str) -> Result { - let (child, mask) = if let Some(child) = child.strip_suffix('\'') { - (child, HARDENED_BIT) - } else { - (child, 0) - }; + fn from_str(child: &str) -> Result { + let (child, mask) = + child.strip_suffix('\'').map_or((child, 0), |child| (child, HARDENED_BIT)); let index: u32 = child.parse()?; @@ -72,7 +76,7 @@ impl FromStr for ChildNumber { anyhow::bail!("invalid child number"); } - Ok(ChildNumber(index | mask)) + Ok(Self(index | mask)) } } @@ -85,16 +89,14 @@ pub struct DerivationPath { impl FromStr for DerivationPath { type Err = anyhow::Error; - fn from_str(path: &str) -> Result { + fn from_str(path: &str) -> Result { let mut path = path.split('/'); if path.next() != Some("m") { anyhow::bail!("invalid derivation path"); } - Ok(DerivationPath { - path: path.map(str::parse).collect::>>()?, - }) + Ok(Self { path: path.map(str::parse).collect::>>()? }) } } diff --git a/rosetta-crypto/src/error.rs b/rosetta-crypto/src/error.rs index db6d9a55..3b9dcba9 100644 --- a/rosetta-crypto/src/error.rs +++ b/rosetta-crypto/src/error.rs @@ -1,7 +1,7 @@ use thiserror::Error; /// Errors that can occur while converting or parsing addresses. -#[derive(Debug, Error, PartialEq)] +#[derive(Debug, Error, PartialEq, Eq)] pub enum AddressError { #[error("Invalid address format")] InvalidAddressFormat, diff --git a/rosetta-crypto/src/lib.rs b/rosetta-crypto/src/lib.rs index 1d31c513..7eb1563f 100644 --- a/rosetta-crypto/src/lib.rs +++ b/rosetta-crypto/src/lib.rs @@ -3,10 +3,11 @@ #![deny(warnings)] use anyhow::{Context, Result}; -use ecdsa::hazmat::SignPrimitive; -use ecdsa::signature::hazmat::PrehashSigner; -use ecdsa::signature::{Signer as _, Verifier as _}; -use ecdsa::RecoveryId; +use ecdsa::{ + hazmat::SignPrimitive, + signature::{hazmat::PrehashSigner, Signer as _, Verifier as _}, + RecoveryId, +}; use ed25519_dalek::{Signer as _, Verifier as _}; use sha2::Digest; @@ -33,8 +34,9 @@ pub enum Algorithm { impl Algorithm { /// Returns true if the signer's public key is recoverable from the signature. - pub fn is_recoverable(&self) -> bool { - matches!(self, Algorithm::EcdsaRecoverableSecp256k1) + #[must_use] + pub const fn is_recoverable(&self) -> bool { + matches!(self, Self::EcdsaRecoverableSecp256k1) } } @@ -54,118 +56,129 @@ pub enum SecretKey { impl Clone for SecretKey { fn clone(&self) -> Self { + #[allow(clippy::unwrap_used)] Self::from_bytes(self.algorithm(), &self.to_bytes()).unwrap() } } impl SecretKey { /// Returns the signing algorithm. - pub fn algorithm(&self) -> Algorithm { + #[must_use] + pub const fn algorithm(&self) -> Algorithm { match self { - SecretKey::EcdsaSecp256k1(_) => Algorithm::EcdsaSecp256k1, - SecretKey::EcdsaRecoverableSecp256k1(_) => Algorithm::EcdsaRecoverableSecp256k1, - SecretKey::EcdsaSecp256r1(_) => Algorithm::EcdsaSecp256r1, - SecretKey::Ed25519(_) => Algorithm::Ed25519, - SecretKey::Sr25519(_, _) => Algorithm::Sr25519, + Self::EcdsaSecp256k1(_) => Algorithm::EcdsaSecp256k1, + Self::EcdsaRecoverableSecp256k1(_) => Algorithm::EcdsaRecoverableSecp256k1, + Self::EcdsaSecp256r1(_) => Algorithm::EcdsaSecp256r1, + Self::Ed25519(_) => Algorithm::Ed25519, + Self::Sr25519(_, _) => Algorithm::Sr25519, } } /// Creates a secret key from a byte sequence for a given signing algorithm. + /// + /// # Errors + /// Will return `Err` if `bytes` has the wrong length pub fn from_bytes(algorithm: Algorithm, bytes: &[u8]) -> Result { Ok(match algorithm { Algorithm::EcdsaSecp256k1 => { - SecretKey::EcdsaSecp256k1(ecdsa::SigningKey::from_bytes(bytes.try_into()?)?) - } - Algorithm::EcdsaRecoverableSecp256k1 => SecretKey::EcdsaRecoverableSecp256k1( - ecdsa::SigningKey::from_bytes(bytes.try_into()?)?, - ), + Self::EcdsaSecp256k1(ecdsa::SigningKey::from_bytes(bytes.try_into()?)?) + }, + Algorithm::EcdsaRecoverableSecp256k1 => { + Self::EcdsaRecoverableSecp256k1(ecdsa::SigningKey::from_bytes(bytes.try_into()?)?) + }, Algorithm::EcdsaSecp256r1 => { - SecretKey::EcdsaSecp256r1(ecdsa::SigningKey::from_bytes(bytes.try_into()?)?) - } + Self::EcdsaSecp256r1(ecdsa::SigningKey::from_bytes(bytes.try_into()?)?) + }, Algorithm::Ed25519 => { let secret = ed25519_dalek::SecretKey::from_bytes(bytes)?; let public = ed25519_dalek::PublicKey::from(&secret); let keypair = ed25519_dalek::Keypair { secret, public }; - SecretKey::Ed25519(keypair) - } + Self::Ed25519(keypair) + }, Algorithm::Sr25519 => { if bytes.len() == 32 { let minisecret = schnorrkel::MiniSecretKey::from_bytes(bytes) .map_err(|err| anyhow::anyhow!("{}", err))?; let secret = minisecret.expand_to_keypair(schnorrkel::MiniSecretKey::ED25519_MODE); - SecretKey::Sr25519(secret, Some(minisecret)) + Self::Sr25519(secret, Some(minisecret)) } else { let secret = schnorrkel::SecretKey::from_bytes(bytes) .map_err(|err| anyhow::anyhow!("{}", err))?; - SecretKey::Sr25519(secret.to_keypair(), None) + Self::Sr25519(secret.to_keypair(), None) } - } + }, }) } /// Returns a byte sequence representing the secret key. + #[must_use] pub fn to_bytes(&self) -> Vec { match self { - SecretKey::EcdsaSecp256k1(secret) => secret.to_bytes().to_vec(), - SecretKey::EcdsaRecoverableSecp256k1(secret) => secret.to_bytes().to_vec(), - SecretKey::EcdsaSecp256r1(secret) => secret.to_bytes().to_vec(), - SecretKey::Ed25519(secret) => secret.secret.to_bytes().to_vec(), - SecretKey::Sr25519(_, Some(minisecret)) => minisecret.as_bytes().to_vec(), - SecretKey::Sr25519(secret, None) => secret.secret.to_bytes().to_vec(), + Self::EcdsaRecoverableSecp256k1(secret) | Self::EcdsaSecp256k1(secret) => { + secret.to_bytes().to_vec() + }, + Self::EcdsaSecp256r1(secret) => secret.to_bytes().to_vec(), + Self::Ed25519(secret) => secret.secret.to_bytes().to_vec(), + Self::Sr25519(_, Some(minisecret)) => minisecret.as_bytes().to_vec(), + Self::Sr25519(secret, None) => secret.secret.to_bytes().to_vec(), } } /// Returns the public key used for verifying signatures. + #[must_use] pub fn public_key(&self) -> PublicKey { match self { - SecretKey::EcdsaSecp256k1(secret) => PublicKey::EcdsaSecp256k1(*secret.verifying_key()), - SecretKey::EcdsaRecoverableSecp256k1(secret) => { + Self::EcdsaSecp256k1(secret) => PublicKey::EcdsaSecp256k1(*secret.verifying_key()), + Self::EcdsaRecoverableSecp256k1(secret) => { PublicKey::EcdsaRecoverableSecp256k1(*secret.verifying_key()) - } - SecretKey::EcdsaSecp256r1(secret) => PublicKey::EcdsaSecp256r1(*secret.verifying_key()), - SecretKey::Ed25519(secret) => PublicKey::Ed25519(secret.public), - SecretKey::Sr25519(secret, _) => PublicKey::Sr25519(secret.public), + }, + Self::EcdsaSecp256r1(secret) => PublicKey::EcdsaSecp256r1(*secret.verifying_key()), + Self::Ed25519(secret) => PublicKey::Ed25519(secret.public), + Self::Sr25519(secret, _) => PublicKey::Sr25519(secret.public), } } /// Signs a message and returns it's signature. + #[must_use] + #[allow(clippy::missing_panics_doc)] pub fn sign(&self, msg: &[u8], context_param: &str) -> Signature { match self { - SecretKey::EcdsaSecp256k1(secret) => Signature::EcdsaSecp256k1(secret.sign(msg)), - SecretKey::EcdsaRecoverableSecp256k1(_) => { + Self::EcdsaSecp256k1(secret) => Signature::EcdsaSecp256k1(secret.sign(msg)), + Self::EcdsaRecoverableSecp256k1(_) => { let digest = sha2::Sha256::digest(msg); - self.sign_prehashed(&digest).unwrap() - } - SecretKey::EcdsaSecp256r1(secret) => Signature::EcdsaSecp256r1(secret.sign(msg)), - SecretKey::Ed25519(secret) => Signature::Ed25519(secret.sign(msg)), - SecretKey::Sr25519(secret, _) => { + #[allow(clippy::expect_used)] + self.sign_prehashed(&digest).expect("supports prehash; qed") + }, + Self::EcdsaSecp256r1(secret) => Signature::EcdsaSecp256r1(secret.sign(msg)), + Self::Ed25519(secret) => Signature::Ed25519(secret.sign(msg)), + Self::Sr25519(secret, _) => { // need a signing context here for substrate let context = schnorrkel::signing_context(context_param.as_bytes()); Signature::Sr25519(secret.sign(context.bytes(msg))) - } + }, } } /// Signs a prehashed message and returns it's signature. + /// + /// # Errors + /// + /// Not supported by [`SecretKey::Ed25519`] and [`SecretKey::Sr25519`] pub fn sign_prehashed(&self, hash: &[u8]) -> Result { Ok(match self { - SecretKey::EcdsaSecp256k1(secret) => { - Signature::EcdsaSecp256k1(secret.sign_prehash(hash)?) - } - SecretKey::EcdsaRecoverableSecp256k1(secret) => { + Self::EcdsaSecp256k1(secret) => Signature::EcdsaSecp256k1(secret.sign_prehash(hash)?), + Self::EcdsaRecoverableSecp256k1(secret) => { let (sig, recid) = secret .as_nonzero_scalar() .try_sign_prehashed_rfc6979::(hash.try_into()?, b"")?; Signature::EcdsaRecoverableSecp256k1(sig, recid.context("no recovery id")?) - } - SecretKey::EcdsaSecp256r1(secret) => { - Signature::EcdsaSecp256r1(secret.sign_prehash(hash)?) - } - SecretKey::Ed25519(_) => anyhow::bail!("unimplemented"), - SecretKey::Sr25519(_, _) => { + }, + Self::EcdsaSecp256r1(secret) => Signature::EcdsaSecp256r1(secret.sign_prehash(hash)?), + Self::Ed25519(_) => anyhow::bail!("unimplemented"), + Self::Sr25519(_, _) => { anyhow::bail!("unsupported") - } + }, }) } } @@ -187,82 +200,93 @@ pub enum PublicKey { impl PublicKey { /// Returns the signing algorithm. - pub fn algorithm(&self) -> Algorithm { + #[must_use] + pub const fn algorithm(&self) -> Algorithm { match self { - PublicKey::EcdsaSecp256k1(_) => Algorithm::EcdsaSecp256k1, - PublicKey::EcdsaRecoverableSecp256k1(_) => Algorithm::EcdsaRecoverableSecp256k1, - PublicKey::EcdsaSecp256r1(_) => Algorithm::EcdsaSecp256r1, - PublicKey::Ed25519(_) => Algorithm::Ed25519, - PublicKey::Sr25519(_) => Algorithm::Sr25519, + Self::EcdsaSecp256k1(_) => Algorithm::EcdsaSecp256k1, + Self::EcdsaRecoverableSecp256k1(_) => Algorithm::EcdsaRecoverableSecp256k1, + Self::EcdsaSecp256r1(_) => Algorithm::EcdsaSecp256r1, + Self::Ed25519(_) => Algorithm::Ed25519, + Self::Sr25519(_) => Algorithm::Sr25519, } } /// Creates a public key from a byte sequence for a given signing algorithm. + /// + /// # Errors + /// + /// Will return `Err` if `bytes` is not a valid public key for `algoritm`. pub fn from_bytes(algorithm: Algorithm, bytes: &[u8]) -> Result { Ok(match algorithm { Algorithm::EcdsaSecp256k1 => { - PublicKey::EcdsaSecp256k1(ecdsa::VerifyingKey::from_sec1_bytes(bytes)?) - } + Self::EcdsaSecp256k1(ecdsa::VerifyingKey::from_sec1_bytes(bytes)?) + }, Algorithm::EcdsaRecoverableSecp256k1 => { - PublicKey::EcdsaRecoverableSecp256k1(ecdsa::VerifyingKey::from_sec1_bytes(bytes)?) - } + Self::EcdsaRecoverableSecp256k1(ecdsa::VerifyingKey::from_sec1_bytes(bytes)?) + }, Algorithm::EcdsaSecp256r1 => { - PublicKey::EcdsaSecp256r1(ecdsa::VerifyingKey::from_sec1_bytes(bytes)?) - } - Algorithm::Ed25519 => PublicKey::Ed25519(ed25519_dalek::PublicKey::from_bytes(bytes)?), + Self::EcdsaSecp256r1(ecdsa::VerifyingKey::from_sec1_bytes(bytes)?) + }, + Algorithm::Ed25519 => Self::Ed25519(ed25519_dalek::PublicKey::from_bytes(bytes)?), Algorithm::Sr25519 => { let public = schnorrkel::PublicKey::from_bytes(bytes) .map_err(|err| anyhow::anyhow!("{}", err))?; - PublicKey::Sr25519(public) - } + Self::Sr25519(public) + }, }) } /// Returns a byte sequence representing the public key. + #[must_use] pub fn to_bytes(&self) -> Vec { match self { - PublicKey::EcdsaSecp256k1(public) => public.to_encoded_point(true).as_bytes().to_vec(), - PublicKey::EcdsaRecoverableSecp256k1(public) => { + Self::EcdsaSecp256k1(public) => public.to_encoded_point(true).as_bytes().to_vec(), + Self::EcdsaRecoverableSecp256k1(public) => { public.to_encoded_point(true).as_bytes().to_vec() - } - PublicKey::EcdsaSecp256r1(public) => public.to_encoded_point(true).as_bytes().to_vec(), - PublicKey::Ed25519(public) => public.to_bytes().to_vec(), - PublicKey::Sr25519(public) => public.to_bytes().to_vec(), + }, + Self::EcdsaSecp256r1(public) => public.to_encoded_point(true).as_bytes().to_vec(), + Self::Ed25519(public) => public.to_bytes().to_vec(), + Self::Sr25519(public) => public.to_bytes().to_vec(), } } /// Returns an uncompressed byte sequence representing the public key. + #[must_use] pub fn to_uncompressed_bytes(&self) -> Vec { match self { - PublicKey::EcdsaSecp256k1(public) => public.to_encoded_point(false).as_bytes().to_vec(), - PublicKey::EcdsaRecoverableSecp256k1(public) => { + Self::EcdsaSecp256k1(public) => public.to_encoded_point(false).as_bytes().to_vec(), + Self::EcdsaRecoverableSecp256k1(public) => { public.to_encoded_point(false).as_bytes().to_vec() - } - PublicKey::EcdsaSecp256r1(public) => public.to_encoded_point(false).as_bytes().to_vec(), - PublicKey::Ed25519(public) => public.to_bytes().to_vec(), - PublicKey::Sr25519(public) => public.to_bytes().to_vec(), + }, + Self::EcdsaSecp256r1(public) => public.to_encoded_point(false).as_bytes().to_vec(), + Self::Ed25519(public) => public.to_bytes().to_vec(), + Self::Sr25519(public) => public.to_bytes().to_vec(), } } /// Verifies a signature. + /// + /// # Errors + /// + /// Will return `Err` when: + /// - Signature is invalid + /// - The `sig` type doesn't match `self` type. pub fn verify(&self, msg: &[u8], sig: &Signature) -> Result<()> { match (self, &sig) { - (PublicKey::EcdsaSecp256k1(public), Signature::EcdsaSecp256k1(sig)) => { - public.verify(msg, sig)? - } + (Self::EcdsaSecp256k1(public), Signature::EcdsaSecp256k1(sig)) => { + public.verify(msg, sig)?; + }, ( - PublicKey::EcdsaRecoverableSecp256k1(public), + Self::EcdsaRecoverableSecp256k1(public), Signature::EcdsaRecoverableSecp256k1(sig, _), ) => public.verify(msg, sig)?, - (PublicKey::EcdsaSecp256r1(public), Signature::EcdsaSecp256r1(sig)) => { - public.verify(msg, sig)? - } - (PublicKey::Ed25519(public), Signature::Ed25519(sig)) => public.verify(msg, sig)?, - (PublicKey::Sr25519(public), Signature::Sr25519(sig)) => { - public - .verify_simple(&[], msg, sig) - .map_err(|err| anyhow::anyhow!("{}", err))?; - } + (Self::EcdsaSecp256r1(public), Signature::EcdsaSecp256r1(sig)) => { + public.verify(msg, sig)?; + }, + (Self::Ed25519(public), Signature::Ed25519(sig)) => public.verify(msg, sig)?, + (Self::Sr25519(public), Signature::Sr25519(sig)) => { + public.verify_simple(&[], msg, sig).map_err(|err| anyhow::anyhow!("{}", err))?; + }, (_, _) => anyhow::bail!("unsupported signature scheme"), }; Ok(()) @@ -286,57 +310,63 @@ pub enum Signature { impl Signature { /// Returns the signing algorithm. - pub fn algorithm(&self) -> Algorithm { + #[must_use] + pub const fn algorithm(&self) -> Algorithm { match self { - Signature::EcdsaSecp256k1(_) => Algorithm::EcdsaSecp256k1, - Signature::EcdsaRecoverableSecp256k1(_, _) => Algorithm::EcdsaRecoverableSecp256k1, - Signature::EcdsaSecp256r1(_) => Algorithm::EcdsaSecp256r1, - Signature::Ed25519(_) => Algorithm::Ed25519, - Signature::Sr25519(_) => Algorithm::Sr25519, + Self::EcdsaSecp256k1(_) => Algorithm::EcdsaSecp256k1, + Self::EcdsaRecoverableSecp256k1(_, _) => Algorithm::EcdsaRecoverableSecp256k1, + Self::EcdsaSecp256r1(_) => Algorithm::EcdsaSecp256r1, + Self::Ed25519(_) => Algorithm::Ed25519, + Self::Sr25519(_) => Algorithm::Sr25519, } } /// Creates a signature from a byte sequence for a given signing algorithm. + /// + /// # Errors + /// + /// Will return `Err` if `bytes` doesn't represent a valid signature for `algorithm` pub fn from_bytes(algorithm: Algorithm, bytes: &[u8]) -> Result { Ok(match algorithm { - Algorithm::EcdsaSecp256k1 => { - Signature::EcdsaSecp256k1(ecdsa::Signature::try_from(bytes)?) - } - Algorithm::EcdsaRecoverableSecp256k1 => Signature::EcdsaRecoverableSecp256k1( + Algorithm::EcdsaSecp256k1 => Self::EcdsaSecp256k1(ecdsa::Signature::try_from(bytes)?), + Algorithm::EcdsaRecoverableSecp256k1 => Self::EcdsaRecoverableSecp256k1( ecdsa::Signature::try_from(&bytes[..64])?, RecoveryId::from_byte(bytes[64]).context("invalid signature")?, ), - Algorithm::EcdsaSecp256r1 => { - Signature::EcdsaSecp256r1(ecdsa::Signature::try_from(bytes)?) - } - Algorithm::Ed25519 => Signature::Ed25519(ed25519_dalek::Signature::from_bytes(bytes)?), + Algorithm::EcdsaSecp256r1 => Self::EcdsaSecp256r1(ecdsa::Signature::try_from(bytes)?), + Algorithm::Ed25519 => Self::Ed25519(ed25519_dalek::Signature::from_bytes(bytes)?), Algorithm::Sr25519 => { let sig = schnorrkel::Signature::from_bytes(bytes) .map_err(|err| anyhow::anyhow!("{}", err))?; - Signature::Sr25519(sig) - } + Self::Sr25519(sig) + }, }) } /// Returns a byte sequence representing the signature. + #[must_use] pub fn to_bytes(&self) -> Vec { match self { - Signature::EcdsaSecp256k1(sig) => sig.to_vec(), - Signature::EcdsaRecoverableSecp256k1(sig, recovery_id) => { + Self::EcdsaSecp256k1(sig) => sig.to_vec(), + Self::EcdsaRecoverableSecp256k1(sig, recovery_id) => { let mut bytes = Vec::with_capacity(65); bytes.extend(sig.to_bytes()); bytes.push(recovery_id.to_byte()); bytes - } - Signature::EcdsaSecp256r1(sig) => sig.to_vec(), - Signature::Ed25519(sig) => sig.to_bytes().to_vec(), - Signature::Sr25519(sig) => sig.to_bytes().to_vec(), + }, + Self::EcdsaSecp256r1(sig) => sig.to_vec(), + Self::Ed25519(sig) => sig.to_bytes().to_vec(), + Self::Sr25519(sig) => sig.to_bytes().to_vec(), } } /// Returns the recovered public key if supported. + /// + /// # Errors + /// + /// Will return `Err` if `msg` is the public key cannot be recovered pub fn recover(&self, msg: &[u8]) -> Result> { - if let Signature::EcdsaRecoverableSecp256k1(signature, recovery_id) = self { + if let Self::EcdsaRecoverableSecp256k1(signature, recovery_id) = self { let recovered_key = ecdsa::VerifyingKey::recover_from_msg(msg, signature, *recovery_id)?; Ok(Some(PublicKey::EcdsaRecoverableSecp256k1(recovered_key))) @@ -346,8 +376,12 @@ impl Signature { } /// Returns the recovered public key if supported. + /// + /// # Errors + /// + /// Will return `Err` if the public key cannot be recovered pub fn recover_prehashed(&self, hash: &[u8]) -> Result> { - if let Signature::EcdsaRecoverableSecp256k1(signature, recovery_id) = self { + if let Self::EcdsaRecoverableSecp256k1(signature, recovery_id) = self { let recovered_key = ecdsa::VerifyingKey::recover_from_prehash(hash, signature, *recovery_id)?; Ok(Some(PublicKey::EcdsaRecoverableSecp256k1(recovered_key))) diff --git a/rosetta-docker/src/config.rs b/rosetta-docker/src/config.rs index 84d56146..e23ceed8 100644 --- a/rosetta-docker/src/config.rs +++ b/rosetta-docker/src/config.rs @@ -10,7 +10,7 @@ const DEFAULT_DOCKER_ENDPOINT: &str = "unix:///var/run/docker.sock"; /// For windows the default endpoint is "npipe:////./pipe/docker_engine" /// But currently this is not supported by docker-api, using to default tcp endpoint instead -/// https://github.com/vv9k/docker-api-rs/issues/57 +/// (https://github.com/vv9k/docker-api-rs/issues/57) #[cfg(not(unix))] const DEFAULT_DOCKER_ENDPOINT: &str = "tcp://127.0.0.1:2375"; @@ -21,7 +21,8 @@ pub mod env_vars { /// The location of your client configuration files. pub const DOCKER_CONFIG: &str = "DOCKER_CONFIG"; - /// Name of the `docker context` to use (overrides `DOCKER_HOST` env var and default context set with `docker context use`) + /// Name of the `docker context` to use (overrides `DOCKER_HOST` env var and default context set + /// with `docker context use`) pub const DOCKER_CONTEXT: &str = "DOCKER_CONTEXT"; /// Daemon socket to connect to. @@ -73,7 +74,7 @@ pub fn docker_endpoint() -> String { /// of the configuration files via the `DOCKER_CONFIG` environment variable /// /// Reference: -/// https://github.com/docker/cli/blob/v24.0.5/man/docker-config-json.5.md +/// pub fn docker_config_dir() -> anyhow::Result { // Verifies if the config directory exists let directory_exists = |directory: PathBuf| { @@ -103,8 +104,8 @@ pub fn docker_config_dir() -> anyhow::Result { /// of the configuration files via the `DOCKER_CONFIG` environment variable /// /// Reference: -/// https://github.com/docker/cli/blob/v24.0.5/man/docker-config-json.5.md -/// https://github.com/docker/cli/blob/v24.0.5/cli/config/configfile/file.go#L17-L44 +/// - +/// - pub fn endpoint_from_config(config_dir: PathBuf) -> anyhow::Result { // Extract the current context from config.json file let config_file = config_dir.join("config.json"); @@ -118,9 +119,8 @@ pub fn endpoint_from_config(config_dir: PathBuf) -> anyhow::Result { .parse::() .context("config.json is not a valid json")? .get("currentContext") - .and_then(|value| value.as_str()) - .map(str::to_string) - .unwrap_or_else(|| "default".to_string()); + .and_then(serde_json::Value::as_str) + .map_or_else(|| "default".to_string(), str::to_string); // Find the endpoint find_context_endpoint(config_dir, ¤t_context) @@ -172,7 +172,7 @@ fn sha256_digest(name: &str) -> String { #[cfg(test)] mod tests { use super::*; - use docker_api::Docker; + use docker_api::{opts::ContainerListOpts, Docker}; #[test] fn test_sha256_digest() { @@ -187,7 +187,7 @@ mod tests { // Obs: docker must be running let host = docker_endpoint(); let docker = Docker::new(&host).unwrap(); - let result = docker.containers().list(&Default::default()).await; + let result = docker.containers().list(&ContainerListOpts::default()).await; assert!(result.is_ok()); } } diff --git a/rosetta-docker/src/lib.rs b/rosetta-docker/src/lib.rs index 282c153a..32005b1b 100644 --- a/rosetta-docker/src/lib.rs +++ b/rosetta-docker/src/lib.rs @@ -1,17 +1,17 @@ mod config; -use anyhow::Result; -use docker_api::conn::TtyChunk; -use docker_api::opts::{ - ContainerCreateOpts, ContainerListOpts, ContainerStopOpts, HostPort, LogsOpts, PublishPort, +use anyhow::{Context, Result}; +use docker_api::{ + conn::TtyChunk, + opts::{ + ContainerCreateOpts, ContainerListOpts, ContainerStopOpts, HostPort, LogsOpts, PublishPort, + }, + ApiVersion, Container, Docker, }; -use docker_api::{ApiVersion, Container, Docker}; use futures::stream::StreamExt; use rosetta_client::Wallet; use rosetta_core::{BlockchainClient, BlockchainConfig}; -use std::future::Future; -use std::sync::Arc; -use std::time::Duration; +use std::{future::Future, sync::Arc, time::Duration}; use tokio_retry::{strategy::ExponentialBackoff, RetryIf}; pub struct Env { @@ -20,51 +20,55 @@ pub struct Env { } impl Env { + #[allow(clippy::missing_errors_doc)] pub async fn new( prefix: &str, mut config: BlockchainConfig, start_connector: F, - ) -> Result> + ) -> Result where Fut: Future> + Send, - F: FnMut(BlockchainConfig) -> Fut, + F: FnMut(BlockchainConfig) -> Fut + Send, { env_logger::try_init().ok(); let builder = EnvBuilder::new(prefix)?; - let node_port = builder.random_port(); + let node_port = random_port(); config.node_uri.port = node_port; log::info!("node: {}", node_port); builder.stop_container(&builder.node_name(&config)).await?; let node = builder.run_node(&config).await?; - let client = match builder - .run_connector::(start_connector, config) - .await - { + let client = match builder.run_connector::(start_connector, config).await { Ok(connector) => connector, Err(e) => { let opts = ContainerStopOpts::builder().build(); let _ = node.stop(&opts).await; return Err(e); - } + }, }; - Ok(Self { - client: Arc::new(client), - node, - }) + Ok(Self { client: Arc::new(client), node }) } + #[must_use] pub fn node(&self) -> Arc { Arc::clone(&self.client) } + /// Creates a new ephemeral wallet + /// + /// # Errors + /// Returns `Err` if the node uri is invalid or keyfile doesn't exists pub async fn ephemeral_wallet(&self) -> Result { let config = self.client.config().clone(); let node_uri = config.node_uri.to_string(); Wallet::from_config(config, &node_uri, None).await } + /// Stop all containers + /// + /// # Errors + /// Will return `Err` if it fails to stop the container for some reason pub async fn shutdown(self) -> Result<()> { let opts = ContainerStopOpts::builder().build(); self.node.stop(&opts).await?; @@ -85,17 +89,8 @@ impl<'a> EnvBuilder<'a> { Ok(Self { prefix, docker }) } - fn random_port(&self) -> u16 { - let mut bytes = [0; 2]; - getrandom::getrandom(&mut bytes).unwrap(); - u16::from_le_bytes(bytes) - } - fn node_name(&self, config: &BlockchainConfig) -> String { - format!( - "{}-node-{}-{}", - self.prefix, config.blockchain, config.network - ) + format!("{}-node-{}-{}", self.prefix, config.blockchain, config.network) } async fn stop_container(&self, name: &str) -> Result<()> { @@ -104,15 +99,16 @@ impl<'a> EnvBuilder<'a> { if container .names .as_ref() - .unwrap() + .context("no containers found")? .iter() .any(|n| n.as_str().ends_with(name)) { - let container = Container::new(self.docker.clone(), container.id.unwrap()); + let container = Container::new( + self.docker.clone(), + container.id.context("container doesn't have id")?, + ); log::info!("stopping {}", name); - container - .stop(&ContainerStopOpts::builder().build()) - .await?; + container.stop(&ContainerStopOpts::builder().build()).await?; container.delete().await.ok(); break; } @@ -129,26 +125,21 @@ impl<'a> EnvBuilder<'a> { log::info!("starting {}", name); let container = Container::new(self.docker.clone(), id.clone()); tokio::task::spawn(async move { - let opts = LogsOpts::builder() - .all() - .follow(true) - .stdout(true) - .stderr(true) - .build(); + let opts = LogsOpts::builder().all().follow(true).stdout(true).stderr(true).build(); let mut logs = container.logs(&opts); while let Some(chunk) = logs.next().await { match chunk { Ok(TtyChunk::StdOut(stdout)) => { let stdout = std::str::from_utf8(&stdout).unwrap_or_default(); log::info!("{}: stdout: {}", name, stdout); - } + }, Ok(TtyChunk::StdErr(stderr)) => { let stderr = std::str::from_utf8(&stderr).unwrap_or_default(); log::info!("{}: stderr: {}", name, stderr); - } + }, Err(err) => { log::error!("{}", err); - } + }, Ok(TtyChunk::StdIn(_)) => unreachable!(), } } @@ -161,7 +152,7 @@ impl<'a> EnvBuilder<'a> { Some(Health::Unhealthy) => anyhow::bail!("healthcheck reports unhealthy"), Some(Health::Starting) => { tokio::time::sleep(Duration::from_millis(100)).await; - } + }, _ => break, } } @@ -178,13 +169,13 @@ impl<'a> EnvBuilder<'a> { .auto_remove(true) .attach_stdout(true) .attach_stderr(true) - .publish(PublishPort::tcp(config.node_uri.port as _)) + .publish(PublishPort::tcp(u32::from(config.node_uri.port))) .expose( - PublishPort::tcp(config.node_uri.port as _), - HostPort::new(config.node_uri.port as u32), + PublishPort::tcp(u32::from(config.node_uri.port)), + HostPort::new(u32::from(config.node_uri.port)), ); for port in config.node_additional_ports { - let port = *port as u32; + let port = u32::from(*port); opts = opts.expose(PublishPort::tcp(port), port); } let container = self.run_container(name, &opts.build()).await?; @@ -223,7 +214,7 @@ impl<'a> EnvBuilder<'a> { where T: BlockchainClient, Fut: Future> + Send, - F: FnMut(BlockchainConfig) -> Fut, + F: FnMut(BlockchainConfig) -> Fut + Send, { const MAX_RETRIES: usize = 10; @@ -241,11 +232,11 @@ impl<'a> EnvBuilder<'a> { } result = Ok(client); break; - } + }, Err(error) => { result = Err(error); tokio::time::sleep(delay).await; - } + }, } } result? @@ -255,6 +246,13 @@ impl<'a> EnvBuilder<'a> { } } +fn random_port() -> u16 { + let mut bytes = [0; 2]; + #[allow(clippy::unwrap_used)] + getrandom::getrandom(&mut bytes).unwrap(); + u16::from_le_bytes(bytes) +} + #[derive(Clone, Copy, Debug, Eq, PartialEq)] enum Health { None, @@ -265,13 +263,8 @@ enum Health { async fn health(container: &Container) -> Result> { let inspect = container.inspect().await?; - let status = inspect - .state - .and_then(|state| state.health) - .and_then(|health| health.status); - let Some(status) = status else { - return Ok(None); - }; + let status = inspect.state.and_then(|state| state.health).and_then(|health| health.status); + let Some(status) = status else { return Ok(None) }; Ok(Some(match status.as_str() { "none" => Health::None, "starting" => Health::Starting, @@ -281,19 +274,19 @@ async fn health(container: &Container) -> Result> { })) } -async fn wait_for_http>(url: S, container: &Container) -> Result<()> { +#[derive(Debug)] +enum RetryError { + Retry(anyhow::Error), + ContainerExited(anyhow::Error), +} + +async fn wait_for_http + Send>(url: S, container: &Container) -> Result<()> { let url = url.as_ref(); let retry_strategy = ExponentialBackoff::from_millis(2) .factor(100) .max_delay(Duration::from_secs(2)) .take(20); // limit to 20 retries - #[derive(Debug)] - enum RetryError { - Retry(anyhow::Error), - ContainerExited(anyhow::Error), - } - RetryIf::spawn( retry_strategy, || async move { @@ -306,7 +299,7 @@ async fn wait_for_http>(url: S, container: &Container) -> Result<( return Err(RetryError::ContainerExited(err.into_inner())); } Err(RetryError::Retry(err.into_inner())) - } + }, } }, // Retry Condition @@ -314,8 +307,7 @@ async fn wait_for_http>(url: S, container: &Container) -> Result<( ) .await .map_err(|err| match err { - RetryError::Retry(error) => error, - RetryError::ContainerExited(error) => error, + RetryError::Retry(error) | RetryError::ContainerExited(error) => error, }) } @@ -334,6 +326,7 @@ pub mod tests { ) } + #[allow(clippy::missing_panics_doc, clippy::missing_errors_doc)] pub async fn network_status( start_connector: F, config: BlockchainConfig, @@ -341,25 +334,18 @@ pub mod tests { where T: BlockchainClient, Fut: Future> + Send, - F: FnMut(BlockchainConfig) -> Fut, + F: FnMut(BlockchainConfig) -> Fut + Send, { let env_id = env_id(); - let env = Env::new( - &format!("{env_id}-network-status"), - config.clone(), - start_connector, - ) - .await?; + let env = + Env::new(&format!("{env_id}-network-status"), config.clone(), start_connector).await?; let client = env.node(); // Check if the genesis is consistent let expected_genesis = client.genesis_block().clone(); let actual_genesis = client - .block(&PartialBlockIdentifier { - index: Some(0), - hash: None, - }) + .block(&PartialBlockIdentifier { index: Some(0), hash: None }) .await? .block_identifier; assert_eq!(expected_genesis, actual_genesis); @@ -390,19 +376,15 @@ pub mod tests { Ok(()) } + #[allow(clippy::missing_panics_doc, clippy::missing_errors_doc)] pub async fn account(start_connector: F, config: BlockchainConfig) -> Result<()> where T: BlockchainClient, Fut: Future> + Send, - F: FnMut(BlockchainConfig) -> Fut, + F: FnMut(BlockchainConfig) -> Fut + Send, { let env_id = env_id(); - let env = Env::new( - &format!("{env_id}-account"), - config.clone(), - start_connector, - ) - .await?; + let env = Env::new(&format!("{env_id}-account"), config.clone(), start_connector).await?; let value = 100 * u128::pow(10, config.currency_decimals); let wallet = env.ephemeral_wallet().await?; @@ -416,19 +398,16 @@ pub mod tests { Ok(()) } + #[allow(clippy::missing_panics_doc, clippy::missing_errors_doc)] pub async fn construction(start_connector: F, config: BlockchainConfig) -> Result<()> where T: BlockchainClient, Fut: Future> + Send, - F: FnMut(BlockchainConfig) -> Fut, + F: FnMut(BlockchainConfig) -> Fut + Send, { let env_id = env_id(); - let env = Env::new( - &format!("{env_id}-construction"), - config.clone(), - start_connector, - ) - .await?; + let env = + Env::new(&format!("{env_id}-construction"), config.clone(), start_connector).await?; let faucet = 100 * u128::pow(10, config.currency_decimals); let value = u128::pow(10, config.currency_decimals); diff --git a/rosetta-server/src/ws.rs b/rosetta-server/src/ws.rs index ea97d542..3565e115 100644 --- a/rosetta-server/src/ws.rs +++ b/rosetta-server/src/ws.rs @@ -7,15 +7,19 @@ mod reconnect_impl; mod retry_strategy; mod tungstenite_jsonrpsee; -use crate::ws::reconnect::{AutoReconnectClient, Reconnect}; -use crate::ws::reconnect_impl::{Config as ReconnectConfig, DefaultStrategy}; -use crate::ws::retry_strategy::RetryStrategy; +use crate::ws::{ + reconnect::{AutoReconnectClient, Reconnect}, + reconnect_impl::{Config as ReconnectConfig, DefaultStrategy}, + retry_strategy::RetryStrategy, +}; pub use config::{RpcClientConfig, WsTransportClient}; use futures_util::{future::BoxFuture, FutureExt}; -use jsonrpsee::core::Error as JsonRpseeError; use jsonrpsee::{ client_transport::ws::WsTransportClientBuilder, - core::client::{Client, ClientBuilder}, + core::{ + client::{Client, ClientBuilder}, + Error as JsonRpseeError, + }, }; use std::time::Duration; use tide::http::url::Url; @@ -34,19 +38,19 @@ async fn connect_client(url: Url, config: RpcClientConfig) -> Result { let client = build_socketto_client(builder, url.clone(), &config).await?; tracing::info!("Connected to {} using Socketto", url); client - } + }, WsTransportClient::Tungstenite => { let client = build_tungstenite_client(builder, url.clone(), &config).await?; tracing::info!("Connected to {} using Tungstenite", url); client - } + }, }; Ok(client) } @@ -86,6 +90,11 @@ impl ReconnectConfig for DefaultReconnectConfig { } } +/// Creates an Json-RPC client with default settings +/// +/// # Errors +/// +/// Returns `Err` if it fails to connect to the provided `url` pub async fn default_client( url: &str, config: Option, @@ -96,9 +105,7 @@ pub async fn default_client( .map_err(|e| JsonRpseeError::Transport(anyhow::Error::from(e)))?; let reconnect_config = DefaultReconnectConfig { url, config }; - DefaultStrategy::connect(reconnect_config) - .await - .map(|strategy| strategy.into_client()) + DefaultStrategy::connect(reconnect_config).await.map(Reconnect::into_client) } /// Creates a default jsonrpsee client using socketto. diff --git a/rosetta-server/src/ws/config.rs b/rosetta-server/src/ws/config.rs index 591ec28f..5a52b4b5 100644 --- a/rosetta-server/src/ws/config.rs +++ b/rosetta-server/src/ws/config.rs @@ -7,7 +7,7 @@ use jsonrpsee::{ /// Ten megabytes. pub const TEN_MB_SIZE_BYTES: usize = 10 * 1024 * 1024; -/// Supported WebSocket transport clients. +/// Supported websocket transport clients. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] pub enum WsTransportClient { /// Auto will try to use Socketto first, if it fails, it will fallback to Tungstenite. @@ -34,7 +34,8 @@ pub enum RetryStrategyConfig { /// The power corresponds to the number of past attempts. ExponentialBackoff { /// base duration in milliseconds. - /// The resulting duration is calculated by taking the base to the n-th power, where n denotes the number of past attempts. + /// The resulting duration is calculated by taking the base to the n-th power, where n + /// denotes the number of past attempts. base: u64, /// A multiplicative factor that will be applied to the retry delay. /// For example, using a factor of 1000 will make each delay in units of seconds. @@ -46,8 +47,10 @@ pub enum RetryStrategyConfig { /// A retry strategy driven by the fibonacci series. /// Each retry uses a delay which is the sum of the two previous delays. - /// Depending on the problem at hand, a fibonacci retry strategy might perform better and lead to better throughput than the ExponentialBackoff strategy. - /// See "A Performance Comparison of Different Backoff Algorithms under Different Rebroadcast Probabilities for MANETs." for more details. + /// Depending on the problem at hand, a fibonacci retry strategy might perform better and lead + /// to better throughput than the ExponentialBackoff strategy. See "A Performance Comparison of + /// Different Backoff Algorithms under Different Rebroadcast Probabilities for MANETs." for + /// more details. FibonacciBackoff { /// Initial base duration in milliseconds. initial: u64, @@ -105,8 +108,10 @@ pub struct RpcClientConfig { /// JSON-RPC max concurrent requests (default is 256). pub rpc_max_concurrent_requests: usize, - /// JSON-RPC max buffer capacity for each subscription; when the capacity is exceeded the subscription will be dropped (default is 1024). - /// You may prevent the subscription from being dropped by polling often enough Subscription::next() such that it can keep with the rate as server produces new items on the subscription. + /// JSON-RPC max buffer capacity for each subscription; when the capacity is exceeded the + /// subscription will be dropped (default is 1024). You may prevent the subscription from being + /// dropped by polling often enough Subscription::next() such that it can keep with the rate as + /// server produces new items on the subscription. pub rpc_max_buffer_capacity_per_subscription: NonZeroUsize, /// JSON-RPC request object id data type. (default is IdKind::Number) @@ -121,9 +126,11 @@ pub struct RpcClientConfig { /// /// Periodically submitting pings at a defined interval has mainly two benefits: /// - Directly, it acts as a "keep-alive" alternative in the WebSocket world. - /// - Indirectly by inspecting debug logs, it ensures that the endpoint is still responding to messages. + /// - Indirectly by inspecting debug logs, it ensures that the endpoint is still responding to + /// messages. /// - /// The underlying implementation does not make any assumptions about at which intervals pongs are received. + /// The underlying implementation does not make any assumptions about at which intervals pongs + /// are received. /// /// Note: The interval duration is restarted when /// - a frontend command is submitted @@ -168,7 +175,7 @@ impl Default for RpcClientConfig { impl From<&RpcClientConfig> for ClientBuilder { fn from(config: &RpcClientConfig) -> Self { - let mut builder = ClientBuilder::new() + let mut builder = Self::new() .request_timeout(config.rpc_request_timeout) .max_concurrent_requests(config.rpc_max_concurrent_requests) .max_buffer_capacity_per_subscription( @@ -185,8 +192,9 @@ impl From<&RpcClientConfig> for ClientBuilder { impl From<&RpcClientConfig> for WsTransportClientBuilder { fn from(config: &RpcClientConfig) -> Self { - let message_size = config.max_message_size.unwrap_or(TEN_MB_SIZE_BYTES) as u32; - let mut builder = WsTransportClientBuilder::default() + let message_size = + u32::try_from(config.max_message_size.unwrap_or(TEN_MB_SIZE_BYTES)).unwrap_or(u32::MAX); + let mut builder = Self::default() .max_request_size(message_size) .max_response_size(message_size) .max_redirections(5); diff --git a/rosetta-server/src/ws/error.rs b/rosetta-server/src/ws/error.rs index 5f435274..4f20ff41 100644 --- a/rosetta-server/src/ws/error.rs +++ b/rosetta-server/src/ws/error.rs @@ -1,8 +1,8 @@ use jsonrpsee::{core::Error as JsonRpseeError, types::InvalidRequestId}; /// A version of [`jsonrpsee::core::Error`] that implements [`core::clone::Clone`] trait. -/// Cloning the error is necessary when using [`futures_util::future::Shared`]. if a reconnect fails, the error must be cloned and -/// send back to all pending requests. +/// Cloning the error is necessary when using [`futures_util::future::Shared`]. if a reconnect +/// fails, the error must be cloned and send back to all pending requests. /// /// See [`super::reconnect_impl::ReconnectFuture`] and [`super::reconnect_impl::ReadyOrWaitFuture`] #[allow(dead_code)] @@ -37,46 +37,46 @@ impl Clone for CloneableError { JsonRpseeError::Call(call) => JsonRpseeError::Call(call.clone()), JsonRpseeError::Transport(error) => { JsonRpseeError::Transport(anyhow::format_err!("{error:?}")) - } + }, JsonRpseeError::InvalidResponse(error) => { JsonRpseeError::InvalidResponse(error.clone()) - } + }, JsonRpseeError::RestartNeeded(reason) => JsonRpseeError::RestartNeeded(reason.clone()), - JsonRpseeError::ParseError(error) => JsonRpseeError::Custom(format!("{error:?}")), // TODO: return an parser error instead a custom error + JsonRpseeError::ParseError(error) => JsonRpseeError::Custom(format!("{error:?}")), /* TODO: return an parser error instead a custom error */ JsonRpseeError::InvalidSubscriptionId => JsonRpseeError::InvalidSubscriptionId, JsonRpseeError::InvalidRequestId(error) => { JsonRpseeError::InvalidRequestId(match error { InvalidRequestId::Invalid(message) => { InvalidRequestId::Invalid(message.clone()) - } + }, InvalidRequestId::NotPendingRequest(message) => { InvalidRequestId::NotPendingRequest(message.clone()) - } + }, InvalidRequestId::Occupied(message) => { InvalidRequestId::Occupied(message.clone()) - } + }, }) - } + }, JsonRpseeError::UnregisteredNotification(error) => { JsonRpseeError::UnregisteredNotification(error.clone()) - } + }, JsonRpseeError::DuplicateRequestId => JsonRpseeError::DuplicateRequestId, JsonRpseeError::MethodAlreadyRegistered(method) => { JsonRpseeError::MethodAlreadyRegistered(method.clone()) - } + }, JsonRpseeError::MethodNotFound(method) => { JsonRpseeError::MethodNotFound(method.clone()) - } + }, JsonRpseeError::SubscriptionNameConflict(name) => { JsonRpseeError::SubscriptionNameConflict(name.clone()) - } + }, JsonRpseeError::RequestTimeout => JsonRpseeError::RequestTimeout, JsonRpseeError::MaxSlotsExceeded => JsonRpseeError::MaxSlotsExceeded, JsonRpseeError::AlreadyStopped => JsonRpseeError::AlreadyStopped, JsonRpseeError::EmptyAllowList(list) => JsonRpseeError::EmptyAllowList(list), JsonRpseeError::HttpHeaderRejected(header, value) => { JsonRpseeError::HttpHeaderRejected(header, value.to_string()) - } + }, JsonRpseeError::Custom(message) => JsonRpseeError::Custom(message.clone()), JsonRpseeError::HttpNotImplemented => JsonRpseeError::HttpNotImplemented, JsonRpseeError::EmptyBatchRequest => JsonRpseeError::EmptyBatchRequest, diff --git a/rosetta-server/src/ws/jsonrpsee_client.rs b/rosetta-server/src/ws/jsonrpsee_client.rs index 32604a9f..4f06d55b 100644 --- a/rosetta-server/src/ws/jsonrpsee_client.rs +++ b/rosetta-server/src/ws/jsonrpsee_client.rs @@ -69,13 +69,11 @@ where let id = match stream.kind() { SubscriptionKind::Subscription(SubscriptionId::Str(id)) => { Some(id.clone().into_owned()) - } + }, _ => None, }; - let stream = stream - .map_err(|e| RpcError::ClientError(Box::new(e))) - .boxed(); + let stream = stream.map_err(|e| RpcError::ClientError(Box::new(e))).boxed(); Ok(RpcSubscription { stream, id }) }) } diff --git a/rosetta-server/src/ws/reconnect.rs b/rosetta-server/src/ws/reconnect.rs index c7088b9b..b325ccbf 100644 --- a/rosetta-server/src/ws/reconnect.rs +++ b/rosetta-server/src/ws/reconnect.rs @@ -1,20 +1,22 @@ use super::jsonrpsee_client::Params as RpcParams; use async_trait::async_trait; -use jsonrpsee::core::client::BatchResponse; -use jsonrpsee::core::params::BatchRequestBuilder; use jsonrpsee::core::{ - client::{ClientT, Subscription, SubscriptionClientT}, + client::{BatchResponse, ClientT, Subscription, SubscriptionClientT}, + params::BatchRequestBuilder, traits::ToRpcParams, Error, }; use serde::de::DeserializeOwned; -use std::fmt::{Debug, Display, Formatter}; -use std::future::Future; -use std::ops::{Deref, DerefMut}; -use std::sync::atomic::{AtomicU32, Ordering}; +use std::{ + fmt::{Debug, Display, Formatter}, + future::Future, + ops::{Deref, DerefMut}, + sync::atomic::{AtomicU32, Ordering}, +}; /// Reconnect trait. -/// This trait exposes callbacks which are called when the server returns a RestartNeeded error. +/// This trait exposes callbacks which are called when the server returns a [`Error::RestartNeeded`] +/// error. pub trait Reconnect: 'static + Sized + Send + Sync { type Client: SubscriptionClientT + 'static + Send + Sync; type ClientRef: AsRef + Send + Sync; @@ -39,9 +41,9 @@ pub trait Reconnect: 'static + Sized + Send + Sync { /// Here is the right place to block the requests until the client reconnects. fn ready(&self) -> Self::ReadyFuture<'_>; - /// Callback called when the client returns a RestartNeeded error. + /// Callback called when the client returns a [`Error::RestartNeeded`] error. /// # Params - /// - `client` - The client which returned the RestartNeeded error. + /// - `client` - The client which returned the [`Error::RestartNeeded`] error. fn restart_needed(&self, client: Self::ClientRef) -> Self::RestartNeededFuture<'_>; /// Force reconnect and return a new client. @@ -87,12 +89,10 @@ where let reconnect_count = self.reconnect_count.fetch_add(1, Ordering::SeqCst) + 1; self.span.record("reconnects", reconnect_count); tracing::error!("Reconneting RPC client due error: {reason}"); - Reconnect::restart_needed(&self.client, client) - .await - .map_err(|error| { - tracing::error!("rpc client is unavailable: {error:?}"); - error - }) + Reconnect::restart_needed(&self.client, client).await.map_err(|error| { + tracing::error!("rpc client is unavailable: {error:?}"); + error + }) } } @@ -162,11 +162,11 @@ where Err(Error::RestartNeeded(message)) => { let client = self.restart_needed(message, client).await?; ClientT::notification(client.as_ref(), method, params).await - } + }, Err(error) => { tracing::error!("notification '{method}' failed: {error:?}"); Err(error) - } + }, } } @@ -187,11 +187,11 @@ where Error::RestartNeeded(message) => { let client = self.restart_needed(message, client).await?; ClientT::request::(client.as_ref(), method, params).await - } + }, error => { tracing::error!("rpc request '{method}' failed: {error:?}"); Err(error) - } + }, } } @@ -213,11 +213,11 @@ where Error::RestartNeeded(message) => { let client = self.restart_needed(message, client).await?; ClientT::batch_request(client.as_ref(), batch).await - } + }, error => { tracing::error!("batch request failed: {error:?}"); Err(error) - } + }, } } } @@ -263,11 +263,11 @@ where unsubscribe_method, ) .await - } + }, error => { tracing::error!("subscription to '{subscribe_method}' failed: {error:?}"); Err(error) - } + }, } } @@ -289,11 +289,11 @@ where Error::RestartNeeded(message) => { let client = self.restart_needed(message, client).await?; SubscriptionClientT::subscribe_to_method(client.as_ref(), method).await - } + }, error => { tracing::error!("subscription to '{method}' failed: {error:?}"); Err(error) - } + }, } } } diff --git a/rosetta-server/src/ws/reconnect_impl.rs b/rosetta-server/src/ws/reconnect_impl.rs index af9bea67..954acd08 100644 --- a/rosetta-server/src/ws/reconnect_impl.rs +++ b/rosetta-server/src/ws/reconnect_impl.rs @@ -13,8 +13,7 @@ use std::{ num::NonZeroU32, ops::Deref, pin::Pin, - sync::Arc, - sync::RwLock, + sync::{Arc, RwLock}, task::{Context, Poll}, time::Duration, }; @@ -34,8 +33,9 @@ pub trait Config: 'static + Sized + Send + Sync + Debug { /// and a reconnect strategy. /// /// # Example of Retry Strategies: - /// - FixedInterval: A retry is performed in fixed intervals. - /// - Exponential Backoff: The resulting duration is calculated by taking the base to the `n`-th power, + /// - Fixed Interval: A retry is performed in fixed intervals. + /// - Exponential Backoff: The resulting duration is calculated by taking the base to the `n`-th + /// power, /// where `n` denotes the number of past attempts. fn retry_strategy(&self) -> Self::RetryStrategy; @@ -52,7 +52,7 @@ pub trait Config: 'static + Sized + Send + Sync + Debug { /// The default reconnect strategy. /// /// This strategy will reconnect the client using the following algorithm: -/// - When the client returns a RestartNeeded error, the strategy will try to reconnect +/// - When the client returns a [`Error::RestartNeeded`] error, the strategy will try to reconnect /// - Thread-safety: one single reconnect attempt is allowed at the same time /// - After reconnecting, this strategy will retry to process the request /// - While reconnecting, this strategy will hold all the requests until the reconnect finishes @@ -70,9 +70,7 @@ pub struct DefaultStrategy { impl Clone for DefaultStrategy { fn clone(&self) -> Self { - Self { - inner: Arc::clone(&self.inner), - } + Self { inner: Arc::clone(&self.inner) } } } @@ -91,27 +89,27 @@ impl DefaultStrategy { self.inner.clone() } - /// Creates a future that is immediately ready if the client is idle. or pending if reconnecting. + /// Creates a future that is immediately ready if the client is idle. or pending if + /// reconnecting. pub fn acquire_client(&self) -> ReadyOrWaitFuture { - let guard = match self.inner.connection_status.read() { - Ok(guard) => guard, + let connection_status = match self.inner.connection_status.read() { + Ok(guard) => guard.deref().clone(), Err(error) => { return ReadyOrWaitFuture::ready(Err(Error::Custom(format!( "FATAL ERROR, client lock was poisoned: {error}" - )))); - } + )))) + }, }; - match guard.deref().clone() { + match connection_status { ConnectionStatus::Ready(client) => ReadyOrWaitFuture::ready(Ok(client)), ConnectionStatus::Reconnecting(future) => { ReadyOrWaitFuture::::wait(self.inner.config.max_pending_delay(), future) - } + }, } } - /// Creates a future that reconnects the client, the reconnect only works when the provided - /// client_id is greater than the current client id, this is a mechanism for avoid racing conditions. + /// Creates a future that reconnects or waits for the client reconnects pub fn reconnect_or_wait(&self) -> ReadyOrWaitFuture { // Acquire write lock, making sure only one thread is handling the reconnect let mut guard = match self.inner.connection_status.write() { @@ -119,12 +117,12 @@ impl DefaultStrategy { Err(error) => { return ReadyOrWaitFuture::ready(Err(Error::Custom(format!( "FATAL ERROR, client lock was poisoned: {error}" - )))); - } + )))) + }, }; // If the client is already reconnecting, reuse the same future - if let ConnectionStatus::Reconnecting(future) = guard.deref() { + if let ConnectionStatus::Reconnecting(future) = &*guard { return ReadyOrWaitFuture::wait(self.inner.config.max_pending_delay(), future.clone()); }; @@ -132,7 +130,7 @@ impl DefaultStrategy { // TODO: Reconnect in another task/thread let reconnect_future = ReconnectFuture::new(self.inner.clone()).shared(); *guard = ConnectionStatus::Reconnecting(reconnect_future.clone()); - + drop(guard); ReadyOrWaitFuture::wait(self.inner.config.max_pending_delay(), reconnect_future) } } @@ -198,17 +196,13 @@ pub struct ReadyOrWaitFuture { } impl ReadyOrWaitFuture { - pub fn ready(result: Result, Error>) -> Self { - Self { - state: Some(ReadyOrWaitState::Ready(result)), - } + pub const fn ready(result: Result, Error>) -> Self { + Self { state: Some(ReadyOrWaitState::Ready(result)) } } pub fn wait(timeout: Duration, future: Shared>) -> Self { let future = futures_util::future::select(Delay::new(timeout), future); - Self { - state: Some(ReadyOrWaitState::Waiting(future)), - } + Self { state: Some(ReadyOrWaitState::Waiting(future)) } } } @@ -233,22 +227,22 @@ impl Future for ReadyOrWaitFuture { return Poll::Ready(Err(Error::Custom( "Timeout: cannot process request, client reconnecting..." .to_string(), - ))); - } + ))) + }, // The client was reconnected! Poll::Ready(Either::Right((Ok(client), _))) => { - return Poll::Ready(Ok(client)); - } + return Poll::Ready(Ok(client)) + }, // Failed to reconnect Poll::Ready(Either::Right((Err(result), _))) => { - return Poll::Ready(Err(result.into_inner())); - } + return Poll::Ready(Err(result.into_inner())) + }, Poll::Pending => { *this.state = Some(ReadyOrWaitState::Waiting(future)); return Poll::Pending; - } + }, } - } + }, None => panic!("ClientReadyFuture polled after completion"), } } @@ -267,9 +261,10 @@ pub enum ReconnectStateMachine { /// 3 - The next retry attempt will be in 3 seconds /// /// # State Transitions - /// 1 - [`ReconnectStateMachine::Reconnecting`] if the reconnect attempt takes longer than the retry delay - /// 2 - [`ReconnectStateMachine::Failure`] if reconnect fails before the retry delay, the delay is passed as parameter - /// 3 - [`ReconnectStateMachine::Success`] if reconnect succeeds + /// 1 - [`ReconnectStateMachine::Reconnecting`] if the reconnect attempt takes longer than the + /// retry delay 2 - [`ReconnectStateMachine::Failure`] if reconnect fails before the retry + /// delay, the delay is passed as parameter 3 - [`ReconnectStateMachine::Success`] if reconnect + /// succeeds ReconnectAndWaitDelay(Select), /// Waiting for reconnecting to complete, retry immediately if fails @@ -298,16 +293,14 @@ pub enum ReconnectStateMachine { /// # State Transitions /// 1 - [`ReconnectStateMachine::Retry`] if no delay is provided /// 2 - [`ReconnectStateMachine::Waiting`] if a delay is provided - Failure { - error: Error, - maybe_delay: Option, - }, + Failure { error: Error, maybe_delay: Option }, /// Retrying to connect /// /// # State Transitions - /// 1 - if retry_strategy.next() is Some(delay), transition to [`ReconnectStateMachine::ReconnectAndWaitDelay`] - /// 2 - if retry_strategy.next() is None, transition to [`ReconnectStateMachine::Reconnecting`] + /// 1 - if retry_strategy.next() is Some(delay), transition to + /// [`ReconnectStateMachine::ReconnectAndWaitDelay`] 2 - if retry_strategy.next() is None, + /// transition to [`ReconnectStateMachine::Reconnecting`] Retry, /// The connection was reestablished successfully @@ -316,25 +309,22 @@ pub enum ReconnectStateMachine { /// Update the ConnectionStatus on the [`SharedState`] and return the client /// /// # State Transitions - /// This state is final, may return an error if the `connection_status` at [`SharedState`] was poisoned + /// This state is final, may return an error if the `connection_status` at [`SharedState`] was + /// poisoned Success(T::Client), } impl Debug for ReconnectStateMachine { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { - ReconnectStateMachine::ReconnectAndWaitDelay(_) => { + Self::ReconnectAndWaitDelay(_) => { f.debug_struct("ReconnectStateMachine::ReconnectAndWaitDelay") - } - ReconnectStateMachine::Reconnecting(_) => { - f.debug_struct("ReconnectStateMachine::Reconnecting") - } - ReconnectStateMachine::Waiting(_) => f.debug_struct("ReconnectStateMachine::Waiting"), - ReconnectStateMachine::Failure { .. } => { - f.debug_struct("ReconnectStateMachine::Failure") - } - ReconnectStateMachine::Retry => f.debug_struct("ReconnectStateMachine::Retry"), - ReconnectStateMachine::Success(_) => f.debug_struct("ReconnectStateMachine::Success"), + }, + Self::Reconnecting(_) => f.debug_struct("ReconnectStateMachine::Reconnecting"), + Self::Waiting(_) => f.debug_struct("ReconnectStateMachine::Waiting"), + Self::Failure { .. } => f.debug_struct("ReconnectStateMachine::Failure"), + Self::Retry => f.debug_struct("ReconnectStateMachine::Retry"), + Self::Success(_) => f.debug_struct("ReconnectStateMachine::Success"), } .finish() } @@ -362,9 +352,10 @@ impl ReconnectFuture { let delay = Delay::new(delay); let future = futures_util::future::select(delay, reconnect); ReconnectStateMachine::ReconnectAndWaitDelay(future) - } + }, }; + #[allow(clippy::expect_used)] let attempt = NonZeroU32::new(1).expect("non zero; qed"); Self { attempt, @@ -379,6 +370,7 @@ impl ReconnectFuture { impl Future for ReconnectFuture { type Output = Result, CloneableError>; + #[allow(clippy::too_many_lines)] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); let _enter = this.span.enter(); @@ -387,12 +379,13 @@ impl Future for ReconnectFuture { // Client is reconnecting and the retry delay is counting Some(ReconnectStateMachine::ReconnectAndWaitDelay(mut future)) => { match future.poll_unpin(cx) { - // Reconnect attempt timeout, wait for reconnect complete and retry to reconnect immediatly + // Reconnect attempt timeout, wait for reconnect complete and retry to + // reconnect immediatly Poll::Ready(Either::Left((_, reconnect_future))) => { *this.state_machine = Some(ReconnectStateMachine::Reconnecting(reconnect_future)); continue; - } + }, // Reconnect attempt failed before the retry timeout. Poll::Ready(Either::Right((Err(error), delay))) => { @@ -401,22 +394,22 @@ impl Future for ReconnectFuture { maybe_delay: Some(delay), }); continue; - } + }, // Reconnect attempt succeeded! Poll::Ready(Either::Right((Ok(client), _))) => { *this.state_machine = Some(ReconnectStateMachine::Success(client)); continue; - } + }, // Pending Poll::Pending => { *this.state_machine = Some(ReconnectStateMachine::ReconnectAndWaitDelay(future)); return Poll::Pending; - } + }, } - } + }, // Retry timeout was reached, now just wait for the reconnect to complete Some(ReconnectStateMachine::Reconnecting(mut future)) => { @@ -425,24 +418,22 @@ impl Future for ReconnectFuture { Poll::Ready(Ok(client)) => { *this.state_machine = Some(ReconnectStateMachine::Success(client)); continue; - } + }, // Reconnect attempt failed, don't need to wait for the next retry Poll::Ready(Err(error)) => { - *this.state_machine = Some(ReconnectStateMachine::Failure { - error, - maybe_delay: None, - }); + *this.state_machine = + Some(ReconnectStateMachine::Failure { error, maybe_delay: None }); continue; - } + }, // Pending Poll::Pending => { *this.state_machine = Some(ReconnectStateMachine::Reconnecting(future)); break; - } + }, } - } + }, // Waiting for next reconnect attempt Some(ReconnectStateMachine::Waiting(mut delay)) => match delay.poll_unpin(cx) { @@ -450,11 +441,11 @@ impl Future for ReconnectFuture { Poll::Ready(_) => { *this.state_machine = Some(ReconnectStateMachine::Retry); continue; - } + }, Poll::Pending => { *this.state_machine = Some(ReconnectStateMachine::Waiting(delay)); break; - } + }, }, // Reconnect attempt failed, retry to reconnect after delay or immediately @@ -464,18 +455,12 @@ impl Future for ReconnectFuture { *this.attempt, error ); - *this.state_machine = match maybe_delay { - Some(delay) => { - // Wait for delay - Some(ReconnectStateMachine::Waiting(delay)) - } - None => { - // Retry immediately - Some(ReconnectStateMachine::Retry) - } - }; + *this.state_machine = maybe_delay.map_or_else( + || Some(ReconnectStateMachine::Retry), + |delay| Some(ReconnectStateMachine::Waiting(delay)), + ); continue; - } + }, // Increment the attempt counter and retry to connect Some(ReconnectStateMachine::Retry) => { @@ -487,11 +472,11 @@ impl Future for ReconnectFuture { let delay = Delay::new(delay); let future = futures_util::future::select(delay, reconnect); ReconnectStateMachine::ReconnectAndWaitDelay(future) - } + }, }; *this.state_machine = Some(next_state); continue; - } + }, // Reconnect Succeeded! update the connection status and return the client Some(ReconnectStateMachine::Success(client)) => { @@ -506,10 +491,10 @@ impl Future for ReconnectFuture { return Poll::Ready(Err(CloneableError::from(Error::Custom(format!( "FATAL ERROR: client lock was poisoned: {error}" ))))); - } + }, }; - if let ConnectionStatus::Ready(client) = guard.deref() { + if let ConnectionStatus::Ready(client) = &*guard { tracing::warn!( "Racing condition detected, two reconnects running at the same time" ); @@ -517,8 +502,9 @@ impl Future for ReconnectFuture { } *guard = ConnectionStatus::Ready(client.clone()); + drop(guard); return Poll::Ready(Ok(client)); - } + }, None => panic!("ReconnectFuture polled after completion"), } } diff --git a/rosetta-server/src/ws/retry_strategy.rs b/rosetta-server/src/ws/retry_strategy.rs index 837afe40..d2c855e6 100644 --- a/rosetta-server/src/ws/retry_strategy.rs +++ b/rosetta-server/src/ws/retry_strategy.rs @@ -13,7 +13,8 @@ pub enum RetryStrategy { /// A retry strategy driven by the fibonacci series. /// Each retry uses a delay which is the sum of the two previous delays. - /// Depending on the problem at hand, a fibonacci retry strategy might perform better and lead to better throughput than the ExponentialBackoff strategy. + /// Depending on the problem at hand, a fibonacci retry strategy might perform better and lead + /// to better throughput than the ExponentialBackoff strategy. FibonacciBackoff(FibonacciBackoff), } @@ -33,12 +34,8 @@ impl From<&RetryStrategyConfig> for RetryStrategy { match config { RetryStrategyConfig::FixedInterval(duration) => { Self::FixedInterval(FixedInterval::new(*duration)) - } - RetryStrategyConfig::ExponentialBackoff { - base, - factor, - max_delay, - } => { + }, + RetryStrategyConfig::ExponentialBackoff { base, factor, max_delay } => { let mut exponential_backoff = ExponentialBackoff::from_millis(*base); if let Some(factor) = factor.as_ref() { exponential_backoff = exponential_backoff.factor(*factor); @@ -47,12 +44,8 @@ impl From<&RetryStrategyConfig> for RetryStrategy { exponential_backoff = exponential_backoff.max_delay(*max_delay); } Self::ExponentialBackoff(exponential_backoff) - } - RetryStrategyConfig::FibonacciBackoff { - initial, - factor, - max_delay, - } => { + }, + RetryStrategyConfig::FibonacciBackoff { initial, factor, max_delay } => { let mut fibonacci_backoff = FibonacciBackoff::from_millis(*initial); if let Some(factor) = factor.as_ref() { fibonacci_backoff = fibonacci_backoff.factor(*factor); @@ -61,7 +54,7 @@ impl From<&RetryStrategyConfig> for RetryStrategy { fibonacci_backoff = fibonacci_backoff.max_delay(*max_delay); } Self::FibonacciBackoff(fibonacci_backoff) - } + }, } } } diff --git a/rosetta-server/src/ws/tungstenite_jsonrpsee.rs b/rosetta-server/src/ws/tungstenite_jsonrpsee.rs index f044e0d2..76979768 100644 --- a/rosetta-server/src/ws/tungstenite_jsonrpsee.rs +++ b/rosetta-server/src/ws/tungstenite_jsonrpsee.rs @@ -25,18 +25,22 @@ impl From<&RpcClientConfig> for WebSocketConfig { max_message_size: config.max_message_size, max_frame_size: config.max_frame_size, accept_unmasked_frames: config.accept_unmasked_frames, - ..WebSocketConfig::default() + ..Self::default() } } } -/// Tungstenite WebSocket transport for Jsonrpsee. +/// Tungstenite websocket transport for Jsonrpsee. pub struct TungsteniteClient { sender: Sender, receiver: Receiver, } impl TungsteniteClient { + /// Creates a websocket client using the provided `config` and performs the handshare to `url`. + /// + /// # Errors + /// Returns `Err` if the handshake fails pub async fn new(url: Url, config: &RpcClientConfig) -> Result { let config = WebSocketConfig::from(config); let (ws_stream, response) = connect_async_with_config(url, Some(config), false).await?; @@ -46,29 +50,27 @@ impl TungsteniteClient { response.status() ); - let sender = Sender { - inner: send, - max_request_size: config.max_message_size.unwrap_or(usize::MAX), - }; + let sender = + Sender { inner: send, max_request_size: config.max_message_size.unwrap_or(usize::MAX) }; let receiver = Receiver { inner: receive }; Ok(Self { sender, receiver }) } - pub fn split(self) -> (Sender, Receiver) { + pub(crate) fn split(self) -> (Sender, Receiver) { (self.sender, self.receiver) } } -/// Sending end of WebSocket transport. +/// Sending end of websocket transport. #[derive(Debug)] pub struct Sender { inner: SplitSink>, Message>, max_request_size: usize, } -/// Receiving end of WebSocket transport. +/// Receiving end of websocket transport. #[derive(Debug)] pub struct Receiver { inner: SplitStream>>, @@ -123,9 +125,7 @@ impl TransportReceiverT for Receiver { Message::Text(text) => break Ok(ReceivedMessage::Text(text)), Message::Binary(bytes) => break Ok(ReceivedMessage::Bytes(bytes)), Message::Pong(_) => break Ok(ReceivedMessage::Pong), - Message::Close(_) => {} - Message::Ping(_) => {} - Message::Frame(_) => {} + Message::Close(_) | Message::Ping(_) | Message::Frame(_) => {}, } } } diff --git a/rosetta-types/src/account_balance_request.rs b/rosetta-types/src/account_balance_request.rs index 3ed5ff06..317c2135 100644 --- a/rosetta-types/src/account_balance_request.rs +++ b/rosetta-types/src/account_balance_request.rs @@ -8,8 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// AccountBalanceRequest : An AccountBalanceRequest is utilized to make a balance request on the /account/balance endpoint. If the block_identifier is populated, a historical balance query should be performed. - +/// `AccountBalanceRequest` : An `AccountBalanceRequest` is utilized to make a balance request on +/// the /account/balance endpoint. If the `block_identifier` is populated, a historical balance +/// query should be performed. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct AccountBalanceRequest { #[serde(rename = "network_identifier")] @@ -18,22 +19,22 @@ pub struct AccountBalanceRequest { pub account_identifier: crate::AccountIdentifier, #[serde(rename = "block_identifier", skip_serializing_if = "Option::is_none")] pub block_identifier: Option, - /// In some cases, the caller may not want to retrieve all available balances for an AccountIdentifier. If the currencies field is populated, only balances for the specified currencies will be returned. If not populated, all available balances will be returned. + /// In some cases, the caller may not want to retrieve all available balances for an + /// AccountIdentifier. If the currencies field is populated, only balances for the specified + /// currencies will be returned. If not populated, all available balances will be returned. #[serde(rename = "currencies", skip_serializing_if = "Option::is_none")] pub currencies: Option>, } impl AccountBalanceRequest { - /// An AccountBalanceRequest is utilized to make a balance request on the /account/balance endpoint. If the block_identifier is populated, a historical balance query should be performed. - pub fn new( + /// An `AccountBalanceRequest` is utilized to make a balance request on the /account/balance + /// endpoint. If the `block_identifier` is populated, a historical balance query should be + /// performed. + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, account_identifier: crate::AccountIdentifier, - ) -> AccountBalanceRequest { - AccountBalanceRequest { - network_identifier, - account_identifier, - block_identifier: None, - currencies: None, - } + ) -> Self { + Self { network_identifier, account_identifier, block_identifier: None, currencies: None } } } diff --git a/rosetta-types/src/account_balance_response.rs b/rosetta-types/src/account_balance_response.rs index e0400472..b4f5599d 100644 --- a/rosetta-types/src/account_balance_response.rs +++ b/rosetta-types/src/account_balance_response.rs @@ -8,8 +8,11 @@ * Generated by: https://openapi-generator.tech */ -/// AccountBalanceResponse : An AccountBalanceResponse is returned on the /account/balance endpoint. If an account has a balance for each AccountIdentifier describing it (ex: an ERC-20 token balance on a few smart contracts), an account balance request must be made with each AccountIdentifier. The `coins` field was removed and replaced by by `/account/coins` in `v1.4.7`. - +/// `AccountBalanceResponse` : An `AccountBalanceResponse` is returned on the /account/balance +/// endpoint. If an account has a balance for each `AccountIdentifier` describing it (ex: an ERC-20 +/// token balance on a few smart contracts), an account balance request must be made with each +/// `AccountIdentifier`. The `coins` field was removed and replaced by by `/account/coins` in +/// `v1.4.7`. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct AccountBalanceResponse { #[serde(rename = "block_identifier")] @@ -17,21 +20,23 @@ pub struct AccountBalanceResponse { /// A single account may have a balance in multiple currencies. #[serde(rename = "balances")] pub balances: Vec, - /// Account-based blockchains that utilize a nonce or sequence number should include that number in the metadata. This number could be unique to the identifier or global across the account address. + /// Account-based blockchains that utilize a nonce or sequence number should include that + /// number in the metadata. This number could be unique to the identifier or global across the + /// account address. #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option, } impl AccountBalanceResponse { - /// An AccountBalanceResponse is returned on the /account/balance endpoint. If an account has a balance for each AccountIdentifier describing it (ex: an ERC-20 token balance on a few smart contracts), an account balance request must be made with each AccountIdentifier. The `coins` field was removed and replaced by by `/account/coins` in `v1.4.7`. - pub fn new( + /// An `AccountBalanceResponse` is returned on the /account/balance endpoint. If an account has + /// a balance for each `AccountIdentifier` describing it (ex: an ERC-20 token balance on a few + /// smart contracts), an account balance request must be made with each `AccountIdentifier`. + /// The `coins` field was removed and replaced by by `/account/coins` in `v1.4.7`. + #[must_use] + pub const fn new( block_identifier: crate::BlockIdentifier, balances: Vec, - ) -> AccountBalanceResponse { - AccountBalanceResponse { - block_identifier, - balances, - metadata: None, - } + ) -> Self { + Self { block_identifier, balances, metadata: None } } } diff --git a/rosetta-types/src/account_coins_request.rs b/rosetta-types/src/account_coins_request.rs index e5ba9bf1..cdf908e4 100644 --- a/rosetta-types/src/account_coins_request.rs +++ b/rosetta-types/src/account_coins_request.rs @@ -8,34 +8,33 @@ * Generated by: https://openapi-generator.tech */ -/// AccountCoinsRequest : AccountCoinsRequest is utilized to make a request on the /account/coins endpoint. - +/// `AccountCoinsRequest` : `AccountCoinsRequest` is utilized to make a request on the +/// /account/coins endpoint. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct AccountCoinsRequest { #[serde(rename = "network_identifier")] pub network_identifier: crate::NetworkIdentifier, #[serde(rename = "account_identifier")] pub account_identifier: crate::AccountIdentifier, - /// Include state from the mempool when looking up an account's unspent coins. Note, using this functionality breaks any guarantee of idempotency. + /// Include state from the mempool when looking up an account's unspent coins. Note, using this + /// functionality breaks any guarantee of idempotency. #[serde(rename = "include_mempool")] pub include_mempool: bool, - /// In some cases, the caller may not want to retrieve coins for all currencies for an AccountIdentifier. If the currencies field is populated, only coins for the specified currencies will be returned. If not populated, all unspent coins will be returned. + /// In some cases, the caller may not want to retrieve coins for all currencies for an + /// AccountIdentifier. If the currencies field is populated, only coins for the specified + /// currencies will be returned. If not populated, all unspent coins will be returned. #[serde(rename = "currencies", skip_serializing_if = "Option::is_none")] pub currencies: Option>, } impl AccountCoinsRequest { - /// AccountCoinsRequest is utilized to make a request on the /account/coins endpoint. - pub fn new( + /// `AccountCoinsRequest` is utilized to make a request on the /account/coins endpoint. + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, account_identifier: crate::AccountIdentifier, include_mempool: bool, - ) -> AccountCoinsRequest { - AccountCoinsRequest { - network_identifier, - account_identifier, - include_mempool, - currencies: None, - } + ) -> Self { + Self { network_identifier, account_identifier, include_mempool, currencies: None } } } diff --git a/rosetta-types/src/account_coins_response.rs b/rosetta-types/src/account_coins_response.rs index b095dc63..a89ac5af 100644 --- a/rosetta-types/src/account_coins_response.rs +++ b/rosetta-types/src/account_coins_response.rs @@ -8,30 +8,30 @@ * Generated by: https://openapi-generator.tech */ -/// AccountCoinsResponse : AccountCoinsResponse is returned on the /account/coins endpoint and includes all unspent Coins owned by an AccountIdentifier. - +/// `AccountCoinsResponse` : `AccountCoinsResponse` is returned on the /account/coins endpoint and +/// includes all unspent Coins owned by an `AccountIdentifier`. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct AccountCoinsResponse { #[serde(rename = "block_identifier")] pub block_identifier: crate::BlockIdentifier, - /// If a blockchain is UTXO-based, all unspent Coins owned by an account_identifier should be returned alongside the balance. It is highly recommended to populate this field so that users of the Rosetta API implementation don't need to maintain their own indexer to track their UTXOs. + /// If a blockchain is UTXO-based, all unspent Coins owned by an account_identifier should be + /// returned alongside the balance. It is highly recommended to populate this field so that + /// users of the Rosetta API implementation don't need to maintain their own indexer to track + /// their UTXOs. #[serde(rename = "coins")] pub coins: Vec, - /// Account-based blockchains that utilize a nonce or sequence number should include that number in the metadata. This number could be unique to the identifier or global across the account address. + /// Account-based blockchains that utilize a nonce or sequence number should include that + /// number in the metadata. This number could be unique to the identifier or global across the + /// account address. #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option, } impl AccountCoinsResponse { - /// AccountCoinsResponse is returned on the /account/coins endpoint and includes all unspent Coins owned by an AccountIdentifier. - pub fn new( - block_identifier: crate::BlockIdentifier, - coins: Vec, - ) -> AccountCoinsResponse { - AccountCoinsResponse { - block_identifier, - coins, - metadata: None, - } + /// `AccountCoinsResponse` is returned on the /account/coins endpoint and includes all unspent + /// Coins owned by an `AccountIdentifier`. + #[must_use] + pub const fn new(block_identifier: crate::BlockIdentifier, coins: Vec) -> Self { + Self { block_identifier, coins, metadata: None } } } diff --git a/rosetta-types/src/account_faucet_request.rs b/rosetta-types/src/account_faucet_request.rs index 2157d1fe..b91c9528 100644 --- a/rosetta-types/src/account_faucet_request.rs +++ b/rosetta-types/src/account_faucet_request.rs @@ -1,6 +1,6 @@ use crate::{AccountIdentifier, NetworkIdentifier}; -/// AccountFaucetRequest : AccountFaucetRequest is sent for faucet on an account. +/// `AccountFaucetRequest` : `AccountFaucetRequest` is sent for faucet on an account. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct AccountFaucetRequest { #[serde(rename = "network_identifier")] @@ -12,16 +12,13 @@ pub struct AccountFaucetRequest { } impl AccountFaucetRequest { - /// AccountCoinsRequest is utilized to make a request on the /account/coins endpoint. - pub fn new( + /// `AccountCoinsRequest` is utilized to make a request on the /account/coins endpoint. + #[must_use] + pub const fn new( network_identifier: NetworkIdentifier, account_identifier: AccountIdentifier, faucet_parameter: u128, - ) -> AccountFaucetRequest { - AccountFaucetRequest { - network_identifier, - account_identifier, - faucet_parameter, - } + ) -> Self { + Self { network_identifier, account_identifier, faucet_parameter } } } diff --git a/rosetta-types/src/account_identifier.rs b/rosetta-types/src/account_identifier.rs index 0edf161c..03cf4bc7 100644 --- a/rosetta-types/src/account_identifier.rs +++ b/rosetta-types/src/account_identifier.rs @@ -8,27 +8,30 @@ * Generated by: https://openapi-generator.tech */ -/// AccountIdentifier : The account_identifier uniquely identifies an account within a network. All fields in the account_identifier are utilized to determine this uniqueness (including the metadata field, if populated). - +/// `AccountIdentifier` : The `account_identifier` uniquely identifies an account within a network. +/// All fields in the `account_identifier` are utilized to determine this uniqueness (including the +/// metadata field, if populated). #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct AccountIdentifier { - /// The address may be a cryptographic public key (or some encoding of it) or a provided username. + /// The address may be a cryptographic public key (or some encoding of it) or a provided + /// username. #[serde(rename = "address")] pub address: String, #[serde(rename = "sub_account", skip_serializing_if = "Option::is_none")] pub sub_account: Option, - /// Blockchains that utilize a username model (where the address is not a derivative of a cryptographic public key) should specify the public key(s) owned by the address in metadata. + /// Blockchains that utilize a username model (where the address is not a derivative of a + /// cryptographic public key) should specify the public key(s) owned by the address in + /// metadata. #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option, } impl AccountIdentifier { - /// The account_identifier uniquely identifies an account within a network. All fields in the account_identifier are utilized to determine this uniqueness (including the metadata field, if populated). - pub fn new(address: String) -> AccountIdentifier { - AccountIdentifier { - address, - sub_account: None, - metadata: None, - } + /// The `account_identifier` uniquely identifies an account within a network. All fields in the + /// `account_identifier` are utilized to determine this uniqueness (including the metadata + /// field, if populated). + #[must_use] + pub const fn new(address: String) -> Self { + Self { address, sub_account: None, metadata: None } } } diff --git a/rosetta-types/src/allow.rs b/rosetta-types/src/allow.rs index 2ef7f37e..e638658c 100644 --- a/rosetta-types/src/allow.rs +++ b/rosetta-types/src/allow.rs @@ -8,49 +8,66 @@ * Generated by: https://openapi-generator.tech */ -/// Allow : Allow specifies supported Operation status, Operation types, and all possible error statuses. This Allow object is used by clients to validate the correctness of a Rosetta Server implementation. It is expected that these clients will error if they receive some response that contains any of the above information that is not specified here. - +/// Allow : Allow specifies supported Operation status, Operation types, and all possible error +/// statuses. This Allow object is used by clients to validate the correctness of a Rosetta Server +/// implementation. It is expected that these clients will error if they receive some response that +/// contains any of the above information that is not specified here. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Allow { - /// All Operation.Status this implementation supports. Any status that is returned during parsing that is not listed here will cause client validation to error. + /// All Operation.Status this implementation supports. Any status that is returned during + /// parsing that is not listed here will cause client validation to error. #[serde(rename = "operation_statuses")] pub operation_statuses: Vec, - /// All Operation.Type this implementation supports. Any type that is returned during parsing that is not listed here will cause client validation to error. + /// All Operation.Type this implementation supports. Any type that is returned during parsing + /// that is not listed here will cause client validation to error. #[serde(rename = "operation_types")] pub operation_types: Vec, - /// All Errors that this implementation could return. Any error that is returned during parsing that is not listed here will cause client validation to error. + /// All Errors that this implementation could return. Any error that is returned during parsing + /// that is not listed here will cause client validation to error. #[serde(rename = "errors")] pub errors: Vec, - /// Any Rosetta implementation that supports querying the balance of an account at any height in the past should set this to true. + /// Any Rosetta implementation that supports querying the balance of an account at any height + /// in the past should set this to true. #[serde(rename = "historical_balance_lookup")] pub historical_balance_lookup: bool, - /// If populated, `timestamp_start_index` indicates the first block index where block timestamps are considered valid (i.e. all blocks less than `timestamp_start_index` could have invalid timestamps). This is useful when the genesis block (or blocks) of a network have timestamp 0. If not populated, block timestamps are assumed to be valid for all available blocks. - #[serde( - rename = "timestamp_start_index", - skip_serializing_if = "Option::is_none" - )] + /// If populated, `timestamp_start_index` indicates the first block index where block + /// timestamps are considered valid (i.e. all blocks less than `timestamp_start_index` could + /// have invalid timestamps). This is useful when the genesis block (or blocks) of a network + /// have timestamp 0. If not populated, block timestamps are assumed to be valid for all + /// available blocks. + #[serde(rename = "timestamp_start_index", skip_serializing_if = "Option::is_none")] pub timestamp_start_index: Option, - /// All methods that are supported by the /call endpoint. Communicating which parameters should be provided to /call is the responsibility of the implementer (this is en lieu of defining an entire type system and requiring the implementer to define that in Allow). + /// All methods that are supported by the /call endpoint. Communicating which parameters should + /// be provided to /call is the responsibility of the implementer (this is en lieu of defining + /// an entire type system and requiring the implementer to define that in Allow). #[serde(rename = "call_methods")] pub call_methods: Option>, - /// BalanceExemptions is an array of BalanceExemption indicating which account balances could change without a corresponding Operation. BalanceExemptions should be used sparingly as they may introduce significant complexity for integrators that attempt to reconcile all account balance changes. If your implementation relies on any BalanceExemptions, you MUST implement historical balance lookup (the ability to query an account balance at any BlockIdentifier). + /// BalanceExemptions is an array of BalanceExemption indicating which account balances could + /// change without a corresponding Operation. BalanceExemptions should be used sparingly as + /// they may introduce significant complexity for integrators that attempt to reconcile all + /// account balance changes. If your implementation relies on any BalanceExemptions, you MUST + /// implement historical balance lookup (the ability to query an account balance at any + /// BlockIdentifier). #[serde(rename = "balance_exemptions")] pub balance_exemptions: Option>, - /// Any Rosetta implementation that can update an AccountIdentifier's unspent coins based on the contents of the mempool should populate this field as true. If false, requests to `/account/coins` that set `include_mempool` as true will be automatically rejected. + /// Any Rosetta implementation that can update an AccountIdentifier's unspent coins based on + /// the contents of the mempool should populate this field as true. If false, requests to + /// `/account/coins` that set `include_mempool` as true will be automatically rejected. #[serde(rename = "mempool_coins")] pub mempool_coins: bool, #[serde(rename = "block_hash_case", skip_serializing_if = "Option::is_none")] pub block_hash_case: Option, - #[serde( - rename = "transaction_hash_case", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "transaction_hash_case", skip_serializing_if = "Option::is_none")] pub transaction_hash_case: Option, } impl Allow { - /// Allow specifies supported Operation status, Operation types, and all possible error statuses. This Allow object is used by clients to validate the correctness of a Rosetta Server implementation. It is expected that these clients will error if they receive some response that contains any of the above information that is not specified here. - pub fn new( + /// Allow specifies supported Operation status, Operation types, and all possible error + /// statuses. This Allow object is used by clients to validate the correctness of a Rosetta + /// Server implementation. It is expected that these clients will error if they receive some + /// response that contains any of the above information that is not specified here. + #[must_use] + pub const fn new( operation_statuses: Vec, operation_types: Vec, errors: Vec, @@ -58,8 +75,8 @@ impl Allow { call_methods: Option>, balance_exemptions: Option>, mempool_coins: bool, - ) -> Allow { - Allow { + ) -> Self { + Self { operation_statuses, operation_types, errors, diff --git a/rosetta-types/src/amount.rs b/rosetta-types/src/amount.rs index 2e91be45..6ca1b336 100644 --- a/rosetta-types/src/amount.rs +++ b/rosetta-types/src/amount.rs @@ -8,11 +8,12 @@ * Generated by: https://openapi-generator.tech */ -/// Amount : Amount is some Value of a Currency. It is considered invalid to specify a Value without a Currency. - +/// Amount : Amount is some Value of a Currency. It is considered invalid to specify a Value without +/// a Currency. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Amount { - /// Value of the transaction in atomic units represented as an arbitrary-sized signed integer. For example, 1 BTC would be represented by a value of 100000000. + /// Value of the transaction in atomic units represented as an arbitrary-sized signed integer. + /// For example, 1 BTC would be represented by a value of 100000000. #[serde(rename = "value")] pub value: String, #[serde(rename = "currency")] @@ -22,12 +23,10 @@ pub struct Amount { } impl Amount { - /// Amount is some Value of a Currency. It is considered invalid to specify a Value without a Currency. - pub fn new(value: String, currency: crate::Currency) -> Amount { - Amount { - value, - currency, - metadata: None, - } + /// Amount is some Value of a Currency. It is considered invalid to specify a Value without a + /// Currency. + #[must_use] + pub const fn new(value: String, currency: crate::Currency) -> Self { + Self { value, currency, metadata: None } } } diff --git a/rosetta-types/src/balance_exemption.rs b/rosetta-types/src/balance_exemption.rs index 10e98357..cbaf9f9f 100644 --- a/rosetta-types/src/balance_exemption.rs +++ b/rosetta-types/src/balance_exemption.rs @@ -8,15 +8,20 @@ * Generated by: https://openapi-generator.tech */ -/// BalanceExemption : BalanceExemption indicates that the balance for an exempt account could change without a corresponding Operation. This typically occurs with staking rewards, vesting balances, and Currencies with a dynamic supply. Currently, it is possible to exempt an account from strict reconciliation by SubAccountIdentifier.Address or by Currency. This means that any account with SubAccountIdentifier.Address would be exempt or any balance of a particular Currency would be exempt, respectively. BalanceExemptions should be used sparingly as they may introduce significant complexity for integrators that attempt to reconcile all account balance changes. If your implementation relies on any BalanceExemptions, you MUST implement historical balance lookup (the ability to query an account balance at any BlockIdentifier). - +/// `BalanceExemption` : `BalanceExemption` indicates that the balance for an exempt account could +/// change without a corresponding Operation. This typically occurs with staking rewards, vesting +/// balances, and Currencies with a dynamic supply. Currently, it is possible to exempt an account +/// from strict reconciliation by SubAccountIdentifier.Address or by Currency. This means that any +/// account with SubAccountIdentifier.Address would be exempt or any balance of a particular +/// Currency would be exempt, respectively. `BalanceExemptions` should be used sparingly as they +/// may introduce significant complexity for integrators that attempt to reconcile all account +/// balance changes. If your implementation relies on any `BalanceExemptions`, you MUST implement +/// historical balance lookup (the ability to query an account balance at any `BlockIdentifier`). #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct BalanceExemption { - /// SubAccountAddress is the SubAccountIdentifier.Address that the BalanceExemption applies to (regardless of the value of SubAccountIdentifier.Metadata). - #[serde( - rename = "sub_account_address", - skip_serializing_if = "Option::is_none" - )] + /// SubAccountAddress is the SubAccountIdentifier.Address that the BalanceExemption applies to + /// (regardless of the value of SubAccountIdentifier.Metadata). + #[serde(rename = "sub_account_address", skip_serializing_if = "Option::is_none")] pub sub_account_address: Option, #[serde(rename = "currency", skip_serializing_if = "Option::is_none")] pub currency: Option, @@ -25,12 +30,18 @@ pub struct BalanceExemption { } impl BalanceExemption { - /// BalanceExemption indicates that the balance for an exempt account could change without a corresponding Operation. This typically occurs with staking rewards, vesting balances, and Currencies with a dynamic supply. Currently, it is possible to exempt an account from strict reconciliation by SubAccountIdentifier.Address or by Currency. This means that any account with SubAccountIdentifier.Address would be exempt or any balance of a particular Currency would be exempt, respectively. BalanceExemptions should be used sparingly as they may introduce significant complexity for integrators that attempt to reconcile all account balance changes. If your implementation relies on any BalanceExemptions, you MUST implement historical balance lookup (the ability to query an account balance at any BlockIdentifier). - pub fn new() -> BalanceExemption { - BalanceExemption { - sub_account_address: None, - currency: None, - exemption_type: None, - } + /// `BalanceExemption` indicates that the balance for an exempt account could change without a + /// corresponding Operation. This typically occurs with staking rewards, vesting balances, and + /// Currencies with a dynamic supply. Currently, it is possible to exempt an account from + /// strict reconciliation by SubAccountIdentifier.Address or by Currency. This means that any + /// account with SubAccountIdentifier.Address would be exempt or any balance of a particular + /// Currency would be exempt, respectively. `BalanceExemptions` should be used sparingly as + /// they may introduce significant complexity for integrators that attempt to reconcile all + /// account balance changes. If your implementation relies on any `BalanceExemptions`, you MUST + /// implement historical balance lookup (the ability to query an account balance at any + /// `BlockIdentifier`). + #[must_use] + pub const fn new() -> Self { + Self { sub_account_address: None, currency: None, exemption_type: None } } } diff --git a/rosetta-types/src/block.rs b/rosetta-types/src/block.rs index ed93d515..a033cdd9 100644 --- a/rosetta-types/src/block.rs +++ b/rosetta-types/src/block.rs @@ -8,15 +8,19 @@ * Generated by: https://openapi-generator.tech */ -/// Block : Blocks contain an array of Transactions that occurred at a particular BlockIdentifier. A hard requirement for blocks returned by Rosetta implementations is that they MUST be _inalterable_: once a client has requested and received a block identified by a specific BlockIndentifier, all future calls for that same BlockIdentifier must return the same block contents. - +/// Block : Blocks contain an array of Transactions that occurred at a particular `BlockIdentifier`. +/// A hard requirement for blocks returned by Rosetta implementations is that they MUST be +/// _inalterable_: once a client has requested and received a block identified by a specific +/// `BlockIndentifier`, all future calls for that same `BlockIdentifier` must return the same block +/// contents. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Block { #[serde(rename = "block_identifier")] pub block_identifier: crate::BlockIdentifier, #[serde(rename = "parent_block_identifier")] pub parent_block_identifier: crate::BlockIdentifier, - /// The timestamp of the block in milliseconds since the Unix Epoch. The timestamp is stored in milliseconds because some blockchains produce blocks more often than once a second. + /// The timestamp of the block in milliseconds since the Unix Epoch. The timestamp is stored in + /// milliseconds because some blockchains produce blocks more often than once a second. #[serde(rename = "timestamp")] pub timestamp: i64, #[serde(rename = "transactions")] @@ -26,19 +30,18 @@ pub struct Block { } impl Block { - /// Blocks contain an array of Transactions that occurred at a particular BlockIdentifier. A hard requirement for blocks returned by Rosetta implementations is that they MUST be _inalterable_: once a client has requested and received a block identified by a specific BlockIndentifier, all future calls for that same BlockIdentifier must return the same block contents. - pub fn new( + /// Blocks contain an array of Transactions that occurred at a particular `BlockIdentifier`. A + /// hard requirement for blocks returned by Rosetta implementations is that they MUST be + /// _inalterable_: once a client has requested and received a block identified by a specific + /// `BlockIndentifier`, all future calls for that same `BlockIdentifier` must return the same + /// block contents. + #[must_use] + pub const fn new( block_identifier: crate::BlockIdentifier, parent_block_identifier: crate::BlockIdentifier, timestamp: i64, transactions: Vec, - ) -> Block { - Block { - block_identifier, - parent_block_identifier, - timestamp, - transactions, - metadata: None, - } + ) -> Self { + Self { block_identifier, parent_block_identifier, timestamp, transactions, metadata: None } } } diff --git a/rosetta-types/src/block_event.rs b/rosetta-types/src/block_event.rs index 12d83c8e..0ded5762 100644 --- a/rosetta-types/src/block_event.rs +++ b/rosetta-types/src/block_event.rs @@ -8,11 +8,13 @@ * Generated by: https://openapi-generator.tech */ -/// BlockEvent : BlockEvent represents the addition or removal of a BlockIdentifier from storage. Streaming BlockEvents allows lightweight clients to update their own state without needing to implement their own syncing logic. - +/// `BlockEvent` : `BlockEvent` represents the addition or removal of a `BlockIdentifier` from +/// storage. Streaming `BlockEvents` allows lightweight clients to update their own state without +/// needing to implement their own syncing logic. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct BlockEvent { - /// sequence is the unique identifier of a BlockEvent within the context of a NetworkIdentifier. + /// sequence is the unique identifier of a BlockEvent within the context of a + /// NetworkIdentifier. #[serde(rename = "sequence")] pub sequence: i64, #[serde(rename = "block_identifier")] @@ -22,16 +24,15 @@ pub struct BlockEvent { } impl BlockEvent { - /// BlockEvent represents the addition or removal of a BlockIdentifier from storage. Streaming BlockEvents allows lightweight clients to update their own state without needing to implement their own syncing logic. - pub fn new( + /// `BlockEvent` represents the addition or removal of a `BlockIdentifier` from storage. + /// Streaming `BlockEvents` allows lightweight clients to update their own state without needing + /// to implement their own syncing logic. + #[must_use] + pub const fn new( sequence: i64, block_identifier: crate::BlockIdentifier, r#type: crate::BlockEventType, - ) -> BlockEvent { - BlockEvent { - sequence, - block_identifier, - r#type, - } + ) -> Self { + Self { sequence, block_identifier, r#type } } } diff --git a/rosetta-types/src/block_event_type.rs b/rosetta-types/src/block_event_type.rs index c7bd6134..19c6c5bf 100644 --- a/rosetta-types/src/block_event_type.rs +++ b/rosetta-types/src/block_event_type.rs @@ -8,9 +8,8 @@ * Generated by: https://openapi-generator.tech */ -/// BlockEventType : BlockEventType determines if a BlockEvent represents the addition or removal of a block. - -/// BlockEventType determines if a BlockEvent represents the addition or removal of a block. +/// `BlockEventType` : `BlockEventType` determines if a `BlockEvent` represents the addition or +/// removal of a block. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum BlockEventType { #[serde(rename = "block_added")] @@ -29,7 +28,7 @@ impl ToString for BlockEventType { } impl Default for BlockEventType { - fn default() -> BlockEventType { + fn default() -> Self { Self::Added } } diff --git a/rosetta-types/src/block_identifier.rs b/rosetta-types/src/block_identifier.rs index acb27574..56600282 100644 --- a/rosetta-types/src/block_identifier.rs +++ b/rosetta-types/src/block_identifier.rs @@ -8,21 +8,22 @@ * Generated by: https://openapi-generator.tech */ -/// BlockIdentifier : The block_identifier uniquely identifies a block in a particular network. - +/// `BlockIdentifier` : The `block_identifier` uniquely identifies a block in a particular network. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct BlockIdentifier { /// This is also known as the block height. #[serde(rename = "index")] pub index: u64, - /// This should be normalized according to the case specified in the block_hash_case network options. + /// This should be normalized according to the case specified in the block_hash_case network + /// options. #[serde(rename = "hash")] pub hash: String, } impl BlockIdentifier { - /// The block_identifier uniquely identifies a block in a particular network. - pub fn new(index: u64, hash: String) -> BlockIdentifier { - BlockIdentifier { index, hash } + /// The `block_identifier` uniquely identifies a block in a particular network. + #[must_use] + pub const fn new(index: u64, hash: String) -> Self { + Self { index, hash } } } diff --git a/rosetta-types/src/block_request.rs b/rosetta-types/src/block_request.rs index 53a419ba..2dc41918 100644 --- a/rosetta-types/src/block_request.rs +++ b/rosetta-types/src/block_request.rs @@ -8,8 +8,7 @@ * Generated by: https://openapi-generator.tech */ -/// BlockRequest : A BlockRequest is utilized to make a block request on the /block endpoint. - +/// `BlockRequest` : A `BlockRequest` is utilized to make a block request on the /block endpoint. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct BlockRequest { #[serde(rename = "network_identifier")] @@ -19,14 +18,12 @@ pub struct BlockRequest { } impl BlockRequest { - /// A BlockRequest is utilized to make a block request on the /block endpoint. - pub fn new( + /// A `BlockRequest` is utilized to make a block request on the /block endpoint. + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, block_identifier: crate::PartialBlockIdentifier, - ) -> BlockRequest { - BlockRequest { - network_identifier, - block_identifier, - } + ) -> Self { + Self { network_identifier, block_identifier } } } diff --git a/rosetta-types/src/block_response.rs b/rosetta-types/src/block_response.rs index 0a9bde75..10fb0a4e 100644 --- a/rosetta-types/src/block_response.rs +++ b/rosetta-types/src/block_response.rs @@ -8,23 +8,37 @@ * Generated by: https://openapi-generator.tech */ -/// BlockResponse : A BlockResponse includes a fully-populated block or a partially-populated block with a list of other transactions to fetch (other_transactions). As a result of the consensus algorithm of some blockchains, blocks can be omitted (i.e. certain block indices can be skipped). If a query for one of these omitted indices is made, the response should not include a `Block` object. It is VERY important to note that blocks MUST still form a canonical, connected chain of blocks where each block has a unique index. In other words, the `PartialBlockIdentifier` of a block after an omitted block should reference the last non-omitted block. - +/// `BlockResponse` : A `BlockResponse` includes a fully-populated block or a partially-populated +/// block with a list of other transactions to fetch (`other_transactions`). As a result of the +/// consensus algorithm of some blockchains, blocks can be omitted (i.e. certain block indices can +/// be skipped). If a query for one of these omitted indices is made, the response should not +/// include a `Block` object. It is VERY important to note that blocks MUST still form a canonical, +/// connected chain of blocks where each block has a unique index. In other words, the +/// `PartialBlockIdentifier` of a block after an omitted block should reference the last non-omitted +/// block. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct BlockResponse { #[serde(rename = "block", skip_serializing_if = "Option::is_none")] pub block: Option, - /// Some blockchains may require additional transactions to be fetched that weren't returned in the block response (ex: block only returns transaction hashes). For blockchains with a lot of transactions in each block, this can be very useful as consumers can concurrently fetch all transactions returned. + /// Some blockchains may require additional transactions to be fetched that weren't returned in + /// the block response (ex: block only returns transaction hashes). For blockchains with a lot + /// of transactions in each block, this can be very useful as consumers can concurrently fetch + /// all transactions returned. #[serde(rename = "other_transactions", skip_serializing_if = "Option::is_none")] pub other_transactions: Option>, } impl BlockResponse { - /// A BlockResponse includes a fully-populated block or a partially-populated block with a list of other transactions to fetch (other_transactions). As a result of the consensus algorithm of some blockchains, blocks can be omitted (i.e. certain block indices can be skipped). If a query for one of these omitted indices is made, the response should not include a `Block` object. It is VERY important to note that blocks MUST still form a canonical, connected chain of blocks where each block has a unique index. In other words, the `PartialBlockIdentifier` of a block after an omitted block should reference the last non-omitted block. - pub fn new() -> BlockResponse { - BlockResponse { - block: None, - other_transactions: None, - } + /// A `BlockResponse` includes a fully-populated block or a partially-populated block with a + /// list of other transactions to fetch (`other_transactions`). As a result of the consensus + /// algorithm of some blockchains, blocks can be omitted (i.e. certain block indices can be + /// skipped). If a query for one of these omitted indices is made, the response should not + /// include a `Block` object. It is VERY important to note that blocks MUST still form a + /// canonical, connected chain of blocks where each block has a unique index. In other words, + /// the `PartialBlockIdentifier` of a block after an omitted block should reference the last + /// non-omitted block. + #[must_use] + pub const fn new() -> Self { + Self { block: None, other_transactions: None } } } diff --git a/rosetta-types/src/block_transaction.rs b/rosetta-types/src/block_transaction.rs index 4c57489a..905ca87a 100644 --- a/rosetta-types/src/block_transaction.rs +++ b/rosetta-types/src/block_transaction.rs @@ -8,8 +8,8 @@ * Generated by: https://openapi-generator.tech */ -/// BlockTransaction : BlockTransaction contains a populated Transaction and the BlockIdentifier that contains it. - +/// `BlockTransaction` : `BlockTransaction` contains a populated Transaction and the +/// `BlockIdentifier` that contains it. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct BlockTransaction { #[serde(rename = "block_identifier")] @@ -19,14 +19,13 @@ pub struct BlockTransaction { } impl BlockTransaction { - /// BlockTransaction contains a populated Transaction and the BlockIdentifier that contains it. - pub fn new( + /// `BlockTransaction` contains a populated Transaction and the `BlockIdentifier` that contains + /// it. + #[must_use] + pub const fn new( block_identifier: crate::BlockIdentifier, transaction: crate::Transaction, - ) -> BlockTransaction { - BlockTransaction { - block_identifier, - transaction, - } + ) -> Self { + Self { block_identifier, transaction } } } diff --git a/rosetta-types/src/block_transaction_request.rs b/rosetta-types/src/block_transaction_request.rs index 74bac54f..f78ace76 100644 --- a/rosetta-types/src/block_transaction_request.rs +++ b/rosetta-types/src/block_transaction_request.rs @@ -8,8 +8,8 @@ * Generated by: https://openapi-generator.tech */ -/// BlockTransactionRequest : A BlockTransactionRequest is used to fetch a Transaction included in a block that is not returned in a BlockResponse. - +/// `BlockTransactionRequest` : A `BlockTransactionRequest` is used to fetch a Transaction included +/// in a block that is not returned in a `BlockResponse`. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct BlockTransactionRequest { #[serde(rename = "network_identifier")] @@ -21,16 +21,14 @@ pub struct BlockTransactionRequest { } impl BlockTransactionRequest { - /// A BlockTransactionRequest is used to fetch a Transaction included in a block that is not returned in a BlockResponse. - pub fn new( + /// A `BlockTransactionRequest` is used to fetch a Transaction included in a block that is not + /// returned in a `BlockResponse`. + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, block_identifier: crate::BlockIdentifier, transaction_identifier: crate::TransactionIdentifier, - ) -> BlockTransactionRequest { - BlockTransactionRequest { - network_identifier, - block_identifier, - transaction_identifier, - } + ) -> Self { + Self { network_identifier, block_identifier, transaction_identifier } } } diff --git a/rosetta-types/src/block_transaction_response.rs b/rosetta-types/src/block_transaction_response.rs index 06355266..a382c968 100644 --- a/rosetta-types/src/block_transaction_response.rs +++ b/rosetta-types/src/block_transaction_response.rs @@ -8,8 +8,8 @@ * Generated by: https://openapi-generator.tech */ -/// BlockTransactionResponse : A BlockTransactionResponse contains information about a block transaction. - +/// `BlockTransactionResponse` : A `BlockTransactionResponse` contains information about a block +/// transaction. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct BlockTransactionResponse { #[serde(rename = "transaction")] @@ -17,8 +17,9 @@ pub struct BlockTransactionResponse { } impl BlockTransactionResponse { - /// A BlockTransactionResponse contains information about a block transaction. - pub fn new(transaction: crate::Transaction) -> BlockTransactionResponse { - BlockTransactionResponse { transaction } + /// A `BlockTransactionResponse` contains information about a block transaction. + #[must_use] + pub const fn new(transaction: crate::Transaction) -> Self { + Self { transaction } } } diff --git a/rosetta-types/src/call_request.rs b/rosetta-types/src/call_request.rs index 663c03a0..c20eb126 100644 --- a/rosetta-types/src/call_request.rs +++ b/rosetta-types/src/call_request.rs @@ -8,16 +8,19 @@ * Generated by: https://openapi-generator.tech */ -/// CallRequest : CallRequest is the input to the `/call` endpoint. - +/// `CallRequest` : `CallRequest` is the input to the `/call` endpoint. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct CallRequest { #[serde(rename = "network_identifier")] pub network_identifier: crate::NetworkIdentifier, - /// Method is some network-specific procedure call. This method could map to a network-specific RPC endpoint, a method in an SDK generated from a smart contract, or some hybrid of the two. The implementation must define all available methods in the Allow object. However, it is up to the caller to determine which parameters to provide when invoking `/call`. + /// Method is some network-specific procedure call. This method could map to a network-specific + /// RPC endpoint, a method in an SDK generated from a smart contract, or some hybrid of the + /// two. The implementation must define all available methods in the Allow object. However, it + /// is up to the caller to determine which parameters to provide when invoking `/call`. #[serde(rename = "method")] pub method: String, - /// Parameters is some network-specific argument for a method. It is up to the caller to determine which parameters to provide when invoking `/call`. + /// Parameters is some network-specific argument for a method. It is up to the caller to + /// determine which parameters to provide when invoking `/call`. #[serde(rename = "parameters")] pub parameters: serde_json::Value, #[serde(rename = "block_identifier", skip_serializing_if = "Option::is_none")] @@ -25,18 +28,14 @@ pub struct CallRequest { } impl CallRequest { - /// CallRequest is the input to the `/call` endpoint. - pub fn new( + /// `CallRequest` is the input to the `/call` endpoint. + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, method: String, parameters: serde_json::Value, block_identifier: Option, - ) -> CallRequest { - CallRequest { - network_identifier, - method, - parameters, - block_identifier, - } + ) -> Self { + Self { network_identifier, method, parameters, block_identifier } } } diff --git a/rosetta-types/src/call_response.rs b/rosetta-types/src/call_response.rs index e34d1966..4574b27e 100644 --- a/rosetta-types/src/call_response.rs +++ b/rosetta-types/src/call_response.rs @@ -8,21 +8,26 @@ * Generated by: https://openapi-generator.tech */ -/// CallResponse : CallResponse contains the result of a `/call` invocation. - +/// `CallResponse` : `CallResponse` contains the result of a `/call` invocation. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct CallResponse { - /// Result contains the result of the `/call` invocation. This result will not be inspected or interpreted by Rosetta tooling and is left to the caller to decode. + /// Result contains the result of the `/call` invocation. This result will not be inspected or + /// interpreted by Rosetta tooling and is left to the caller to decode. #[serde(rename = "result")] pub result: serde_json::Value, - /// Idempotent indicates that if `/call` is invoked with the same CallRequest again, at any point in time, it will return the same CallResponse. Integrators may cache the CallResponse if this is set to true to avoid making unnecessary calls to the Rosetta implementation. For this reason, implementers should be very conservative about returning true here or they could cause issues for the caller. + /// Idempotent indicates that if `/call` is invoked with the same CallRequest again, at any + /// point in time, it will return the same CallResponse. Integrators may cache the + /// CallResponse if this is set to true to avoid making unnecessary calls to the Rosetta + /// implementation. For this reason, implementers should be very conservative about returning + /// true here or they could cause issues for the caller. #[serde(rename = "idempotent")] pub idempotent: bool, } impl CallResponse { - /// CallResponse contains the result of a `/call` invocation. - pub fn new(result: serde_json::Value, idempotent: bool) -> CallResponse { - CallResponse { result, idempotent } + /// `CallResponse` contains the result of a `/call` invocation. + #[must_use] + pub const fn new(result: serde_json::Value, idempotent: bool) -> Self { + Self { result, idempotent } } } diff --git a/rosetta-types/src/case.rs b/rosetta-types/src/case.rs index 1ae56178..4695afc4 100644 --- a/rosetta-types/src/case.rs +++ b/rosetta-types/src/case.rs @@ -8,9 +8,7 @@ * Generated by: https://openapi-generator.tech */ -/// Case : Case specifies the expected case for strings and hashes. - -/// Case specifies the expected case for strings and hashes. +/// `Case` specifies the expected case for strings and hashes. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Case { #[serde(rename = "upper_case")] @@ -35,7 +33,7 @@ impl ToString for Case { } impl Default for Case { - fn default() -> Case { + fn default() -> Self { Self::UpperCase } } diff --git a/rosetta-types/src/coin.rs b/rosetta-types/src/coin.rs index b0d8f410..beb9f9ae 100644 --- a/rosetta-types/src/coin.rs +++ b/rosetta-types/src/coin.rs @@ -8,8 +8,7 @@ * Generated by: https://openapi-generator.tech */ -/// Coin : Coin contains its unique identifier and the amount it represents. - +/// `Coin` contains its unique identifier and the amount it represents. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Coin { #[serde(rename = "coin_identifier")] @@ -19,11 +18,9 @@ pub struct Coin { } impl Coin { - /// Coin contains its unique identifier and the amount it represents. - pub fn new(coin_identifier: crate::CoinIdentifier, amount: crate::Amount) -> Coin { - Coin { - coin_identifier, - amount, - } + /// `Coin` contains its unique identifier and the amount it represents. + #[must_use] + pub const fn new(coin_identifier: crate::CoinIdentifier, amount: crate::Amount) -> Self { + Self { coin_identifier, amount } } } diff --git a/rosetta-types/src/coin_action.rs b/rosetta-types/src/coin_action.rs index d0f294e5..fa58153a 100644 --- a/rosetta-types/src/coin_action.rs +++ b/rosetta-types/src/coin_action.rs @@ -8,9 +8,11 @@ * Generated by: https://openapi-generator.tech */ -/// CoinAction : CoinActions are different state changes that a Coin can undergo. When a Coin is created, it is coin_created. When a Coin is spent, it is coin_spent. It is assumed that a single Coin cannot be created or spent more than once. - -/// CoinActions are different state changes that a Coin can undergo. When a Coin is created, it is coin_created. When a Coin is spent, it is coin_spent. It is assumed that a single Coin cannot be created or spent more than once. +/// `CoinAction` : `CoinActions` are different state changes that a Coin can undergo. When a Coin is +/// created, it is `coin_created`. When a Coin is spent, it is `coin_spent`. It is assumed that a +/// single Coin cannot be created or spent more than once. `CoinActions` are different state changes +/// that a Coin can undergo. When a Coin is created, it is `coin_created`. When a Coin is spent, it +/// is `coin_spent`. It is assumed that a single Coin cannot be created or spent more than once. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum CoinAction { #[serde(rename = "coin_created")] @@ -29,7 +31,7 @@ impl ToString for CoinAction { } impl Default for CoinAction { - fn default() -> CoinAction { + fn default() -> Self { Self::Created } } diff --git a/rosetta-types/src/coin_change.rs b/rosetta-types/src/coin_change.rs index 09f6849a..c5c49666 100644 --- a/rosetta-types/src/coin_change.rs +++ b/rosetta-types/src/coin_change.rs @@ -8,8 +8,11 @@ * Generated by: https://openapi-generator.tech */ -/// CoinChange : CoinChange is used to represent a change in state of a some coin identified by a coin_identifier. This object is part of the Operation model and must be populated for UTXO-based blockchains. Coincidentally, this abstraction of UTXOs allows for supporting both account-based transfers and UTXO-based transfers on the same blockchain (when a transfer is account-based, don't populate this model). - +/// `CoinChange` : `CoinChange` is used to represent a change in state of a some coin identified by +/// a `coin_identifier`. This object is part of the Operation model and must be populated for +/// UTXO-based blockchains. Coincidentally, this abstraction of UTXOs allows for supporting both +/// account-based transfers and UTXO-based transfers on the same blockchain (when a transfer is +/// account-based, don't populate this model). #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct CoinChange { #[serde(rename = "coin_identifier")] @@ -19,14 +22,16 @@ pub struct CoinChange { } impl CoinChange { - /// CoinChange is used to represent a change in state of a some coin identified by a coin_identifier. This object is part of the Operation model and must be populated for UTXO-based blockchains. Coincidentally, this abstraction of UTXOs allows for supporting both account-based transfers and UTXO-based transfers on the same blockchain (when a transfer is account-based, don't populate this model). - pub fn new( + /// `CoinChange` is used to represent a change in state of a some coin identified by a + /// `coin_identifier`. This object is part of the Operation model and must be populated for + /// UTXO-based blockchains. Coincidentally, this abstraction of UTXOs allows for supporting + /// both account-based transfers and UTXO-based transfers on the same blockchain (when a + /// transfer is account-based, don't populate this model). + #[must_use] + pub const fn new( coin_identifier: crate::CoinIdentifier, coin_action: crate::CoinAction, - ) -> CoinChange { - CoinChange { - coin_identifier, - coin_action, - } + ) -> Self { + Self { coin_identifier, coin_action } } } diff --git a/rosetta-types/src/coin_identifier.rs b/rosetta-types/src/coin_identifier.rs index f4805b1e..7a35053a 100644 --- a/rosetta-types/src/coin_identifier.rs +++ b/rosetta-types/src/coin_identifier.rs @@ -8,18 +8,19 @@ * Generated by: https://openapi-generator.tech */ -/// CoinIdentifier : CoinIdentifier uniquely identifies a Coin. - +/// `CoinIdentifier` : `CoinIdentifier` uniquely identifies a Coin. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct CoinIdentifier { - /// Identifier should be populated with a globally unique identifier of a Coin. In Bitcoin, this identifier would be transaction_hash:index. + /// Identifier should be populated with a globally unique identifier of a Coin. In Bitcoin, + /// this identifier would be transaction_hash:index. #[serde(rename = "identifier")] pub identifier: String, } impl CoinIdentifier { - /// CoinIdentifier uniquely identifies a Coin. - pub fn new(identifier: String) -> CoinIdentifier { - CoinIdentifier { identifier } + /// `CoinIdentifier` uniquely identifies a Coin. + #[must_use] + pub const fn new(identifier: String) -> Self { + Self { identifier } } } diff --git a/rosetta-types/src/construction_combine_request.rs b/rosetta-types/src/construction_combine_request.rs index d587cc25..69d2f8d9 100644 --- a/rosetta-types/src/construction_combine_request.rs +++ b/rosetta-types/src/construction_combine_request.rs @@ -8,8 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionCombineRequest : ConstructionCombineRequest is the input to the `/construction/combine` endpoint. It contains the unsigned transaction blob returned by `/construction/payloads` and all required signatures to create a network transaction. - +/// `ConstructionCombineRequest` : `ConstructionCombineRequest` is the input to the +/// `/construction/combine` endpoint. It contains the unsigned transaction blob returned by +/// `/construction/payloads` and all required signatures to create a network transaction. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionCombineRequest { #[serde(rename = "network_identifier")] @@ -21,16 +22,15 @@ pub struct ConstructionCombineRequest { } impl ConstructionCombineRequest { - /// ConstructionCombineRequest is the input to the `/construction/combine` endpoint. It contains the unsigned transaction blob returned by `/construction/payloads` and all required signatures to create a network transaction. - pub fn new( + /// `ConstructionCombineRequest` is the input to the `/construction/combine` endpoint. It + /// contains the unsigned transaction blob returned by `/construction/payloads` and all required + /// signatures to create a network transaction. + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, unsigned_transaction: String, signatures: Vec, - ) -> ConstructionCombineRequest { - ConstructionCombineRequest { - network_identifier, - unsigned_transaction, - signatures, - } + ) -> Self { + Self { network_identifier, unsigned_transaction, signatures } } } diff --git a/rosetta-types/src/construction_combine_response.rs b/rosetta-types/src/construction_combine_response.rs index 7a9579cd..af65a0bd 100644 --- a/rosetta-types/src/construction_combine_response.rs +++ b/rosetta-types/src/construction_combine_response.rs @@ -8,8 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionCombineResponse : ConstructionCombineResponse is returned by `/construction/combine`. The network payload will be sent directly to the `construction/submit` endpoint. - +/// `ConstructionCombineResponse` : `ConstructionCombineResponse` is returned by +/// `/construction/combine`. The network payload will be sent directly to the `construction/submit` +/// endpoint. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionCombineResponse { #[serde(rename = "signed_transaction")] @@ -17,8 +18,10 @@ pub struct ConstructionCombineResponse { } impl ConstructionCombineResponse { - /// ConstructionCombineResponse is returned by `/construction/combine`. The network payload will be sent directly to the `construction/submit` endpoint. - pub fn new(signed_transaction: String) -> ConstructionCombineResponse { - ConstructionCombineResponse { signed_transaction } + /// `ConstructionCombineResponse` is returned by `/construction/combine`. The network payload + /// will be sent directly to the `construction/submit` endpoint. + #[must_use] + pub const fn new(signed_transaction: String) -> Self { + Self { signed_transaction } } } diff --git a/rosetta-types/src/construction_derive_request.rs b/rosetta-types/src/construction_derive_request.rs index 3952f5f6..e5040cda 100644 --- a/rosetta-types/src/construction_derive_request.rs +++ b/rosetta-types/src/construction_derive_request.rs @@ -8,8 +8,11 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionDeriveRequest : ConstructionDeriveRequest is passed to the `/construction/derive` endpoint. Network is provided in the request because some blockchains have different address formats for different networks. Metadata is provided in the request because some blockchains allow for multiple address types (i.e. different address for validators vs normal accounts). - +/// `ConstructionDeriveRequest` : `ConstructionDeriveRequest` is passed to the +/// `/construction/derive` endpoint. Network is provided in the request because some blockchains +/// have different address formats for different networks. Metadata is provided in the request +/// because some blockchains allow for multiple address types (i.e. different address for validators +/// vs normal accounts). #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionDeriveRequest { #[serde(rename = "network_identifier")] @@ -21,15 +24,15 @@ pub struct ConstructionDeriveRequest { } impl ConstructionDeriveRequest { - /// ConstructionDeriveRequest is passed to the `/construction/derive` endpoint. Network is provided in the request because some blockchains have different address formats for different networks. Metadata is provided in the request because some blockchains allow for multiple address types (i.e. different address for validators vs normal accounts). - pub fn new( + /// `ConstructionDeriveRequest` is passed to the `/construction/derive` endpoint. Network is + /// provided in the request because some blockchains have different address formats for + /// different networks. Metadata is provided in the request because some blockchains allow for + /// multiple address types (i.e. different address for validators vs normal accounts). + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, public_key: crate::PublicKey, - ) -> ConstructionDeriveRequest { - ConstructionDeriveRequest { - network_identifier, - public_key, - metadata: None, - } + ) -> Self { + Self { network_identifier, public_key, metadata: None } } } diff --git a/rosetta-types/src/construction_derive_response.rs b/rosetta-types/src/construction_derive_response.rs index c5584e0f..109948a5 100644 --- a/rosetta-types/src/construction_derive_response.rs +++ b/rosetta-types/src/construction_derive_response.rs @@ -8,8 +8,8 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionDeriveResponse : ConstructionDeriveResponse is returned by the `/construction/derive` endpoint. - +/// `ConstructionDeriveResponse` : `ConstructionDeriveResponse` is returned by the +/// `/construction/derive` endpoint. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionDeriveResponse { /// [DEPRECATED by `account_identifier` in `v1.4.4`] Address in network-specific format. @@ -22,12 +22,9 @@ pub struct ConstructionDeriveResponse { } impl ConstructionDeriveResponse { - /// ConstructionDeriveResponse is returned by the `/construction/derive` endpoint. - pub fn new() -> ConstructionDeriveResponse { - ConstructionDeriveResponse { - address: None, - account_identifier: None, - metadata: None, - } + /// `ConstructionDeriveResponse` is returned by the `/construction/derive` endpoint. + #[must_use] + pub const fn new() -> Self { + Self { address: None, account_identifier: None, metadata: None } } } diff --git a/rosetta-types/src/construction_hash_request.rs b/rosetta-types/src/construction_hash_request.rs index bdafb7e8..5905315b 100644 --- a/rosetta-types/src/construction_hash_request.rs +++ b/rosetta-types/src/construction_hash_request.rs @@ -8,8 +8,8 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionHashRequest : ConstructionHashRequest is the input to the `/construction/hash` endpoint. - +/// `ConstructionHashRequest` : `ConstructionHashRequest` is the input to the `/construction/hash` +/// endpoint. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionHashRequest { #[serde(rename = "network_identifier")] @@ -19,14 +19,12 @@ pub struct ConstructionHashRequest { } impl ConstructionHashRequest { - /// ConstructionHashRequest is the input to the `/construction/hash` endpoint. - pub fn new( + /// `ConstructionHashRequest` is the input to the `/construction/hash` endpoint. + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, signed_transaction: String, - ) -> ConstructionHashRequest { - ConstructionHashRequest { - network_identifier, - signed_transaction, - } + ) -> Self { + Self { network_identifier, signed_transaction } } } diff --git a/rosetta-types/src/construction_metadata_request.rs b/rosetta-types/src/construction_metadata_request.rs index 8c2e35b7..f57bddd5 100644 --- a/rosetta-types/src/construction_metadata_request.rs +++ b/rosetta-types/src/construction_metadata_request.rs @@ -8,13 +8,19 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionMetadataRequest : A ConstructionMetadataRequest is utilized to get information required to construct a transaction. The Options object used to specify which metadata to return is left purposely unstructured to allow flexibility for implementers. Options is not required in the case that there is network-wide metadata of interest. - +/// `ConstructionMetadataRequest` : A `ConstructionMetadataRequest` is utilized to get information +/// required to construct a transaction. The Options object used to specify which metadata to +/// return is left purposely unstructured to allow flexibility for implementers. Options is not +/// required in the case that there is network-wide metadata of interest. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionMetadataRequest { #[serde(rename = "network_identifier")] pub network_identifier: crate::NetworkIdentifier, - /// Some blockchains require different metadata for different types of transaction construction (ex: delegation versus a transfer). Instead of requiring a blockchain node to return all possible types of metadata for construction (which may require multiple node fetches), the client can populate an options object to limit the metadata returned to only the subset required. + /// Some blockchains require different metadata for different types of transaction construction + /// (ex: delegation versus a transfer). Instead of requiring a blockchain node to return all + /// possible types of metadata for construction (which may require multiple node fetches), the + /// client can populate an options object to limit the metadata returned to only the subset + /// required. #[serde(rename = "options", skip_serializing_if = "Option::is_none")] pub options: Option, @@ -23,12 +29,12 @@ pub struct ConstructionMetadataRequest { } impl ConstructionMetadataRequest { - /// A ConstructionMetadataRequest is utilized to get information required to construct a transaction. The Options object used to specify which metadata to return is left purposely unstructured to allow flexibility for implementers. Options is not required in the case that there is network-wide metadata of interest. - pub fn new(network_identifier: crate::NetworkIdentifier) -> ConstructionMetadataRequest { - ConstructionMetadataRequest { - network_identifier, - options: None, - public_keys: vec![], - } + /// A `ConstructionMetadataRequest` is utilized to get information required to construct a + /// transaction. The Options object used to specify which metadata to return is left purposely + /// unstructured to allow flexibility for implementers. Options is not required in the case that + /// there is network-wide metadata of interest. + #[must_use] + pub const fn new(network_identifier: crate::NetworkIdentifier) -> Self { + Self { network_identifier, options: None, public_keys: vec![] } } } diff --git a/rosetta-types/src/construction_metadata_response.rs b/rosetta-types/src/construction_metadata_response.rs index e549fad6..5622adb9 100644 --- a/rosetta-types/src/construction_metadata_response.rs +++ b/rosetta-types/src/construction_metadata_response.rs @@ -8,8 +8,12 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionMetadataResponse : The ConstructionMetadataResponse returns network-specific metadata used for transaction construction. Optionally, the implementer can return the suggested fee associated with the transaction being constructed. The caller may use this info to adjust the intent of the transaction or to create a transaction with a different account that can pay the suggested fee. Suggested fee is an array in case fee payment must occur in multiple currencies. - +/// `ConstructionMetadataResponse` : The `ConstructionMetadataResponse` returns network-specific +/// metadata used for transaction construction. Optionally, the implementer can return the +/// suggested fee associated with the transaction being constructed. The caller may use this info to +/// adjust the intent of the transaction or to create a transaction with a different account that +/// can pay the suggested fee. Suggested fee is an array in case fee payment must occur in multiple +/// currencies. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionMetadataResponse { #[serde(rename = "metadata")] @@ -19,11 +23,13 @@ pub struct ConstructionMetadataResponse { } impl ConstructionMetadataResponse { - /// The ConstructionMetadataResponse returns network-specific metadata used for transaction construction. Optionally, the implementer can return the suggested fee associated with the transaction being constructed. The caller may use this info to adjust the intent of the transaction or to create a transaction with a different account that can pay the suggested fee. Suggested fee is an array in case fee payment must occur in multiple currencies. - pub fn new(metadata: serde_json::Value) -> ConstructionMetadataResponse { - ConstructionMetadataResponse { - metadata, - suggested_fee: None, - } + /// The `ConstructionMetadataResponse` returns network-specific metadata used for transaction + /// construction. Optionally, the implementer can return the suggested fee associated with the + /// transaction being constructed. The caller may use this info to adjust the intent of the + /// transaction or to create a transaction with a different account that can pay the suggested + /// fee. Suggested fee is an array in case fee payment must occur in multiple currencies. + #[must_use] + pub const fn new(metadata: serde_json::Value) -> Self { + Self { metadata, suggested_fee: None } } } diff --git a/rosetta-types/src/construction_parse_request.rs b/rosetta-types/src/construction_parse_request.rs index 57019327..60d38252 100644 --- a/rosetta-types/src/construction_parse_request.rs +++ b/rosetta-types/src/construction_parse_request.rs @@ -8,8 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionParseRequest : ConstructionParseRequest is the input to the `/construction/parse` endpoint. It allows the caller to parse either an unsigned or signed transaction. - +/// `ConstructionParseRequest` : `ConstructionParseRequest` is the input to the +/// `/construction/parse` endpoint. It allows the caller to parse either an unsigned or signed +/// transaction. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionParseRequest { #[serde(rename = "network_identifier")] @@ -17,22 +18,21 @@ pub struct ConstructionParseRequest { /// Signed is a boolean indicating whether the transaction is signed. #[serde(rename = "signed")] pub signed: bool, - /// This must be either the unsigned transaction blob returned by `/construction/payloads` or the signed transaction blob returned by `/construction/combine`. + /// This must be either the unsigned transaction blob returned by `/construction/payloads` or + /// the signed transaction blob returned by `/construction/combine`. #[serde(rename = "transaction")] pub transaction: String, } impl ConstructionParseRequest { - /// ConstructionParseRequest is the input to the `/construction/parse` endpoint. It allows the caller to parse either an unsigned or signed transaction. - pub fn new( + /// `ConstructionParseRequest` is the input to the `/construction/parse` endpoint. It allows the + /// caller to parse either an unsigned or signed transaction. + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, signed: bool, transaction: String, - ) -> ConstructionParseRequest { - ConstructionParseRequest { - network_identifier, - signed, - transaction, - } + ) -> Self { + Self { network_identifier, signed, transaction } } } diff --git a/rosetta-types/src/construction_parse_response.rs b/rosetta-types/src/construction_parse_response.rs index 2193ccfa..d45e22d9 100644 --- a/rosetta-types/src/construction_parse_response.rs +++ b/rosetta-types/src/construction_parse_response.rs @@ -8,32 +8,29 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionParseResponse : ConstructionParseResponse contains an array of operations that occur in a transaction blob. This should match the array of operations provided to `/construction/preprocess` and `/construction/payloads`. - +/// `ConstructionParseResponse` contains an array of operations that occur in a transaction blob. +/// This should match the array of operations provided to `/construction/preprocess` and +/// `/construction/payloads`. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionParseResponse { #[serde(rename = "operations")] pub operations: Vec, - /// [DEPRECATED by `account_identifier_signers` in `v1.4.4`] All signers (addresses) of a particular transaction. If the transaction is unsigned, it should be empty. + /// [DEPRECATED by `account_identifier_signers` in `v1.4.4`] All signers (addresses) of a + /// particular transaction. If the transaction is unsigned, it should be empty. #[serde(rename = "signers", skip_serializing_if = "Option::is_none")] pub signers: Option>, - #[serde( - rename = "account_identifier_signers", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "account_identifier_signers", skip_serializing_if = "Option::is_none")] pub account_identifier_signers: Option>, #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option, } impl ConstructionParseResponse { - /// ConstructionParseResponse contains an array of operations that occur in a transaction blob. This should match the array of operations provided to `/construction/preprocess` and `/construction/payloads`. - pub fn new(operations: Vec) -> ConstructionParseResponse { - ConstructionParseResponse { - operations, - signers: None, - account_identifier_signers: None, - metadata: None, - } + /// `ConstructionParseResponse` contains an array of operations that occur in a transaction + /// blob. This should match the array of operations provided to `/construction/preprocess` and + /// `/construction/payloads`. + #[must_use] + pub const fn new(operations: Vec) -> Self { + Self { operations, signers: None, account_identifier_signers: None, metadata: None } } } diff --git a/rosetta-types/src/construction_payloads_request.rs b/rosetta-types/src/construction_payloads_request.rs index 096d6691..ff40a481 100644 --- a/rosetta-types/src/construction_payloads_request.rs +++ b/rosetta-types/src/construction_payloads_request.rs @@ -8,8 +8,10 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionPayloadsRequest : ConstructionPayloadsRequest is the request to `/construction/payloads`. It contains the network, a slice of operations, and arbitrary metadata that was returned by the call to `/construction/metadata`. Optionally, the request can also include an array of PublicKeys associated with the AccountIdentifiers returned in ConstructionPreprocessResponse. - +/// `ConstructionPayloadsRequest` is the request to `/construction/payloads`. It contains the +/// network, a slice of operations, and arbitrary metadata that was returned by the call to +/// `/construction/metadata`. Optionally, the request can also include an array of `PublicKeys` +/// associated with the `AccountIdentifiers` returned in `ConstructionPreprocessResponse`. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionPayloadsRequest { #[serde(rename = "network_identifier")] @@ -23,16 +25,15 @@ pub struct ConstructionPayloadsRequest { } impl ConstructionPayloadsRequest { - /// ConstructionPayloadsRequest is the request to `/construction/payloads`. It contains the network, a slice of operations, and arbitrary metadata that was returned by the call to `/construction/metadata`. Optionally, the request can also include an array of PublicKeys associated with the AccountIdentifiers returned in ConstructionPreprocessResponse. - pub fn new( + /// `ConstructionPayloadsRequest` is the request to `/construction/payloads`. It contains the + /// network, a slice of operations, and arbitrary metadata that was returned by the call to + /// `/construction/metadata`. Optionally, the request can also include an array of `PublicKeys` + /// associated with the `AccountIdentifiers` returned in `ConstructionPreprocessResponse`. + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, operations: Vec, - ) -> ConstructionPayloadsRequest { - ConstructionPayloadsRequest { - network_identifier, - operations, - metadata: None, - public_keys: None, - } + ) -> Self { + Self { network_identifier, operations, metadata: None, public_keys: None } } } diff --git a/rosetta-types/src/construction_payloads_response.rs b/rosetta-types/src/construction_payloads_response.rs index eb88d857..40877e8a 100644 --- a/rosetta-types/src/construction_payloads_response.rs +++ b/rosetta-types/src/construction_payloads_response.rs @@ -8,8 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionPayloadsResponse : ConstructionTransactionResponse is returned by `/construction/payloads`. It contains an unsigned transaction blob (that is usually needed to construct the a network transaction from a collection of signatures) and an array of payloads that must be signed by the caller. - +/// `ConstructionTransactionResponse` is returned by `/construction/payloads`. It contains an +/// unsigned transaction blob (that is usually needed to construct the a network transaction from a +/// collection of signatures) and an array of payloads that must be signed by the caller. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionPayloadsResponse { #[serde(rename = "unsigned_transaction")] @@ -19,14 +20,11 @@ pub struct ConstructionPayloadsResponse { } impl ConstructionPayloadsResponse { - /// ConstructionTransactionResponse is returned by `/construction/payloads`. It contains an unsigned transaction blob (that is usually needed to construct the a network transaction from a collection of signatures) and an array of payloads that must be signed by the caller. - pub fn new( - unsigned_transaction: String, - payloads: Vec, - ) -> ConstructionPayloadsResponse { - ConstructionPayloadsResponse { - unsigned_transaction, - payloads, - } + /// `ConstructionTransactionResponse` is returned by `/construction/payloads`. It contains an + /// unsigned transaction blob (that is usually needed to construct the a network transaction + /// from a collection of signatures) and an array of payloads that must be signed by the caller. + #[must_use] + pub const fn new(unsigned_transaction: String, payloads: Vec) -> Self { + Self { unsigned_transaction, payloads } } } diff --git a/rosetta-types/src/construction_preprocess_request.rs b/rosetta-types/src/construction_preprocess_request.rs index 9d87b4ce..1f41281e 100644 --- a/rosetta-types/src/construction_preprocess_request.rs +++ b/rosetta-types/src/construction_preprocess_request.rs @@ -8,8 +8,12 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionPreprocessRequest : ConstructionPreprocessRequest is passed to the `/construction/preprocess` endpoint so that a Rosetta implementation can determine which metadata it needs to request for construction. Metadata provided in this object should NEVER be a product of live data (i.e. the caller must follow some network-specific data fetching strategy outside of the Construction API to populate required Metadata). If live data is required for construction, it MUST be fetched in the call to `/construction/metadata`. - +/// `ConstructionPreprocessRequest` is passed to the `/construction/preprocess` endpoint so that a +/// Rosetta implementation can determine which metadata it needs to request for construction. +/// Metadata provided in this object should NEVER be a product of live data (i.e. the caller must +/// follow some network-specific data fetching strategy outside of the Construction API to populate +/// required Metadata). If live data is required for construction, it MUST be fetched in the call to +/// `/construction/metadata`. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionPreprocessRequest { #[serde(rename = "network_identifier")] @@ -21,15 +25,17 @@ pub struct ConstructionPreprocessRequest { } impl ConstructionPreprocessRequest { - /// ConstructionPreprocessRequest is passed to the `/construction/preprocess` endpoint so that a Rosetta implementation can determine which metadata it needs to request for construction. Metadata provided in this object should NEVER be a product of live data (i.e. the caller must follow some network-specific data fetching strategy outside of the Construction API to populate required Metadata). If live data is required for construction, it MUST be fetched in the call to `/construction/metadata`. - pub fn new( + /// `ConstructionPreprocessRequest` is passed to the `/construction/preprocess` endpoint so that + /// a Rosetta implementation can determine which metadata it needs to request for construction. + /// Metadata provided in this object should NEVER be a product of live data (i.e. the caller + /// must follow some network-specific data fetching strategy outside of the Construction API to + /// populate required Metadata). If live data is required for construction, it MUST be fetched + /// in the call to `/construction/metadata`. + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, operations: Vec, - ) -> ConstructionPreprocessRequest { - ConstructionPreprocessRequest { - network_identifier, - operations, - metadata: None, - } + ) -> Self { + Self { network_identifier, operations, metadata: None } } } diff --git a/rosetta-types/src/construction_preprocess_response.rs b/rosetta-types/src/construction_preprocess_response.rs index 4768fe5b..d1006da2 100644 --- a/rosetta-types/src/construction_preprocess_response.rs +++ b/rosetta-types/src/construction_preprocess_response.rs @@ -8,26 +8,32 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionPreprocessResponse : ConstructionPreprocessResponse contains `options` that will be sent unmodified to `/construction/metadata`. If it is not necessary to make a request to `/construction/metadata`, `options` should be omitted. Some blockchains require the PublicKey of particular AccountIdentifiers to construct a valid transaction. To fetch these PublicKeys, populate `required_public_keys` with the AccountIdentifiers associated with the desired PublicKeys. If it is not necessary to retrieve any PublicKeys for construction, `required_public_keys` should be omitted. - +/// `ConstructionPreprocessResponse` contains `options` that will be sent unmodified to +/// `/construction/metadata`. If it is not necessary to make a request to `/construction/metadata`, +/// `options` should be omitted. Some blockchains require the `PublicKey` of particular +/// `AccountIdentifiers` to construct a valid transaction. To fetch these `PublicKeys`, populate +/// `required_public_keys` with the `AccountIdentifiers` associated with the desired `PublicKeys`. +/// If it is not necessary to retrieve any `PublicKeys` for construction, `required_public_keys` +/// should be omitted. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionPreprocessResponse { /// The options that will be sent directly to `/construction/metadata` by the caller. #[serde(rename = "options", skip_serializing_if = "Option::is_none")] pub options: Option, - #[serde( - rename = "required_public_keys", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "required_public_keys", skip_serializing_if = "Option::is_none")] pub required_public_keys: Option>, } impl ConstructionPreprocessResponse { - /// ConstructionPreprocessResponse contains `options` that will be sent unmodified to `/construction/metadata`. If it is not necessary to make a request to `/construction/metadata`, `options` should be omitted. Some blockchains require the PublicKey of particular AccountIdentifiers to construct a valid transaction. To fetch these PublicKeys, populate `required_public_keys` with the AccountIdentifiers associated with the desired PublicKeys. If it is not necessary to retrieve any PublicKeys for construction, `required_public_keys` should be omitted. - pub fn new() -> ConstructionPreprocessResponse { - ConstructionPreprocessResponse { - options: None, - required_public_keys: None, - } + /// `ConstructionPreprocessResponse` contains `options` that will be sent unmodified to + /// `/construction/metadata`. If it is not necessary to make a request to + /// `/construction/metadata`, `options` should be omitted. Some blockchains require the + /// `PublicKey` of particular `AccountIdentifiers` to construct a valid transaction. To fetch + /// these `PublicKeys`, populate `required_public_keys` with the `AccountIdentifiers` associated + /// with the desired `PublicKeys`. If it is not necessary to retrieve any `PublicKeys` for + /// construction, `required_public_keys` should be omitted. + #[must_use] + pub const fn new() -> Self { + Self { options: None, required_public_keys: None } } } diff --git a/rosetta-types/src/construction_submit_request.rs b/rosetta-types/src/construction_submit_request.rs index d58b9386..f4164f92 100644 --- a/rosetta-types/src/construction_submit_request.rs +++ b/rosetta-types/src/construction_submit_request.rs @@ -8,8 +8,7 @@ * Generated by: https://openapi-generator.tech */ -/// ConstructionSubmitRequest : The transaction submission request includes a signed transaction. - +/// `ConstructionSubmitRequest` : The transaction submission request includes a signed transaction. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct ConstructionSubmitRequest { #[serde(rename = "network_identifier")] @@ -20,13 +19,11 @@ pub struct ConstructionSubmitRequest { impl ConstructionSubmitRequest { /// The transaction submission request includes a signed transaction. - pub fn new( + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, signed_transaction: String, - ) -> ConstructionSubmitRequest { - ConstructionSubmitRequest { - network_identifier, - signed_transaction, - } + ) -> Self { + Self { network_identifier, signed_transaction } } } diff --git a/rosetta-types/src/currency.rs b/rosetta-types/src/currency.rs index a52c8f7f..cb0434a7 100644 --- a/rosetta-types/src/currency.rs +++ b/rosetta-types/src/currency.rs @@ -8,28 +8,29 @@ * Generated by: https://openapi-generator.tech */ -/// Currency : Currency is composed of a canonical Symbol and Decimals. This Decimals value is used to convert an Amount.Value from atomic units (Satoshis) to standard units (Bitcoins). - +/// `Currency` is composed of a canonical Symbol and Decimals. This Decimals value is used to +/// convert an Amount.Value from atomic units (Satoshis) to standard units (Bitcoins). #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Currency { /// Canonical symbol associated with a currency. #[serde(rename = "symbol")] pub symbol: String, - /// Number of decimal places in the standard unit representation of the amount. For example, BTC has 8 decimals. Note that it is not possible to represent the value of some currency in atomic units that is not base 10. + /// Number of decimal places in the standard unit representation of the amount. For example, + /// BTC has 8 decimals. Note that it is not possible to represent the value of some currency in + /// atomic units that is not base 10. #[serde(rename = "decimals")] pub decimals: u32, - /// Any additional information related to the currency itself. For example, it would be useful to populate this object with the contract address of an ERC-20 token. + /// Any additional information related to the currency itself. For example, it would be useful + /// to populate this object with the contract address of an ERC-20 token. #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option, } impl Currency { - /// Currency is composed of a canonical Symbol and Decimals. This Decimals value is used to convert an Amount.Value from atomic units (Satoshis) to standard units (Bitcoins). - pub fn new(symbol: String, decimals: u32) -> Currency { - Currency { - symbol, - decimals, - metadata: None, - } + /// `Currency` is composed of a canonical Symbol and Decimals. This Decimals value is used to + /// convert an Amount.Value from atomic units (Satoshis) to standard units (Bitcoins). + #[must_use] + pub const fn new(symbol: String, decimals: u32) -> Self { + Self { symbol, decimals, metadata: None } } } diff --git a/rosetta-types/src/curve_type.rs b/rosetta-types/src/curve_type.rs index bf6c7048..c0314293 100644 --- a/rosetta-types/src/curve_type.rs +++ b/rosetta-types/src/curve_type.rs @@ -8,9 +8,12 @@ * Generated by: https://openapi-generator.tech */ -/// CurveType : CurveType is the type of cryptographic curve associated with a PublicKey. * secp256k1: SEC compressed - `33 bytes` (https://secg.org/sec1-v2.pdf#subsubsection.2.3.3) * secp256r1: SEC compressed - `33 bytes` (https://secg.org/sec1-v2.pdf#subsubsection.2.3.3) * edwards25519: `y (255-bits) || x-sign-bit (1-bit)` - `32 bytes` (https://ed25519.cr.yp.to/ed25519-20110926.pdf) * tweedle: 1st pk : Fq.t (32 bytes) || 2nd pk : Fq.t (32 bytes) (https://github.com/CodaProtocol/coda/blob/develop/rfcs/0038-rosetta-construction-api.md#marshal-keys) * pallas: `x (255 bits) || y-parity-bit (1-bit) - 32 bytes` (https://github.com/zcash/pasta) - -/// CurveType is the type of cryptographic curve associated with a PublicKey. * secp256k1: SEC compressed - `33 bytes` (https://secg.org/sec1-v2.pdf#subsubsection.2.3.3) * secp256r1: SEC compressed - `33 bytes` (https://secg.org/sec1-v2.pdf#subsubsection.2.3.3) * edwards25519: `y (255-bits) || x-sign-bit (1-bit)` - `32 bytes` (https://ed25519.cr.yp.to/ed25519-20110926.pdf) * tweedle: 1st pk : Fq.t (32 bytes) || 2nd pk : Fq.t (32 bytes) (https://github.com/CodaProtocol/coda/blob/develop/rfcs/0038-rosetta-construction-api.md#marshal-keys) * pallas: `x (255 bits) || y-parity-bit (1-bit) - 32 bytes` (https://github.com/zcash/pasta) +/// `CurveType` is the type of cryptographic curve associated with a `PublicKey`. +/// * [secp256k1: SEC compressed - `33 bytes`](https://secg.org/sec1-v2.pdf#subsubsection.2.3.3) +/// * [secp256r1: SEC compressed - `33 bytes`](https://secg.org/sec1-v2.pdf#subsubsection.2.3.3) +/// * [edwards25519: `y (255-bits) || x-sign-bit (1-bit)` - `32 bytes`](https://ed25519.cr.yp.to/ed25519-20110926.pdf) +/// * [tweedle: 1st pk : Fq.t (32 bytes) || 2nd pk : Fq.t (32 bytes)](https://github.com/CodaProtocol/coda/blob/develop/rfcs/0038-rosetta-construction-api.md#marshal-keys) +/// * [pallas: `x (255 bits) || y-parity-bit (1-bit) - 32 bytes`](https://github.com/zcash/pasta) #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum CurveType { #[serde(rename = "secp256k1")] @@ -41,7 +44,7 @@ impl ToString for CurveType { } impl Default for CurveType { - fn default() -> CurveType { + fn default() -> Self { Self::Secp256k1 } } diff --git a/rosetta-types/src/direction.rs b/rosetta-types/src/direction.rs index 999d2ffa..1253ef8b 100644 --- a/rosetta-types/src/direction.rs +++ b/rosetta-types/src/direction.rs @@ -8,9 +8,10 @@ * Generated by: https://openapi-generator.tech */ -/// Direction : Used by RelatedTransaction to indicate the direction of the relation (i.e. cross-shard/cross-network sends may reference `backward` to an earlier transaction and async execution may reference `forward`). Can be used to indicate if a transaction relation is from child to parent or the reverse. - -/// Used by RelatedTransaction to indicate the direction of the relation (i.e. cross-shard/cross-network sends may reference `backward` to an earlier transaction and async execution may reference `forward`). Can be used to indicate if a transaction relation is from child to parent or the reverse. +/// Used by `RelatedTransaction` to indicate the direction of the relation (i.e. +/// cross-shard/cross-network sends may reference `backward` to an earlier transaction and async +/// execution may reference `forward`). Can be used to indicate if a transaction relation is from +/// child to parent or the reverse. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Direction { #[serde(rename = "forward")] @@ -29,7 +30,7 @@ impl ToString for Direction { } impl Default for Direction { - fn default() -> Direction { + fn default() -> Self { Self::Forward } } diff --git a/rosetta-types/src/error.rs b/rosetta-types/src/error.rs index 745172d1..01bec423 100644 --- a/rosetta-types/src/error.rs +++ b/rosetta-types/src/error.rs @@ -8,37 +8,49 @@ * Generated by: https://openapi-generator.tech */ -/// Error : Instead of utilizing HTTP status codes to describe node errors (which often do not have a good analog), rich errors are returned using this object. Both the code and message fields can be individually used to correctly identify an error. Implementations MUST use unique values for both fields. - +/// `Error`: Instead of utilizing HTTP status codes to describe node errors (which often do not have +/// a good analog), rich errors are returned using this object. Both the code and message fields +/// can be individually used to correctly identify an error. Implementations MUST use unique values +/// for both fields. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Error { - /// Code is a network-specific error code. If desired, this code can be equivalent to an HTTP status code. + /// Code is a network-specific error code. If desired, this code can be equivalent to an HTTP + /// status code. #[serde(rename = "code")] pub code: i32, - /// Message is a network-specific error message. The message MUST NOT change for a given code. In particular, this means that any contextual information should be included in the details field. + /// Message is a network-specific error message. The message MUST NOT change for a given code. + /// In particular, this means that any contextual information should be included in the details + /// field. #[serde(rename = "message")] pub message: String, - /// Description allows the implementer to optionally provide additional information about an error. In many cases, the content of this field will be a copy-and-paste from existing developer documentation. Description can ONLY be populated with generic information about a particular type of error. It MUST NOT be populated with information about a particular instantiation of an error (use `details` for this). Whereas the content of Error.Message should stay stable across releases, the content of Error.Description will likely change across releases (as implementers improve error documentation). For this reason, the content in this field is not part of any type assertion (unlike Error.Message). + /// Description allows the implementer to optionally provide additional information about an + /// error. In many cases, the content of this field will be a copy-and-paste from existing + /// developer documentation. Description can ONLY be populated with generic information about + /// a particular type of error. It MUST NOT be populated with information about a particular + /// instantiation of an error (use `details` for this). Whereas the content of Error.Message + /// should stay stable across releases, the content of Error.Description will likely change + /// across releases (as implementers improve error documentation). For this reason, the content + /// in this field is not part of any type assertion (unlike Error.Message). #[serde(rename = "description", skip_serializing_if = "Option::is_none")] pub description: Option, /// An error is retriable if the same request may succeed if submitted again. #[serde(rename = "retriable")] pub retriable: bool, - /// Often times it is useful to return context specific to the request that caused the error (i.e. a sample of the stack trace or impacted account) in addition to the standard error message. + /// Often times it is useful to return context specific to the request that caused the error + /// (i.e. a sample of the stack trace or impacted account) in addition to the standard error + /// message. #[serde(rename = "details", skip_serializing_if = "Option::is_none")] pub details: Option, } impl Error { - /// Instead of utilizing HTTP status codes to describe node errors (which often do not have a good analog), rich errors are returned using this object. Both the code and message fields can be individually used to correctly identify an error. Implementations MUST use unique values for both fields. - pub fn new(code: i32, message: String, retriable: bool) -> Error { - Error { - code, - message, - description: None, - retriable, - details: None, - } + /// Instead of utilizing HTTP status codes to describe node errors (which often do not have a + /// good analog), rich errors are returned using this object. Both the code and message fields + /// can be individually used to correctly identify an error. Implementations MUST use unique + /// values for both fields. + #[must_use] + pub const fn new(code: i32, message: String, retriable: bool) -> Self { + Self { code, message, description: None, retriable, details: None } } } diff --git a/rosetta-types/src/events_blocks_request.rs b/rosetta-types/src/events_blocks_request.rs index 0538b082..8b333d80 100644 --- a/rosetta-types/src/events_blocks_request.rs +++ b/rosetta-types/src/events_blocks_request.rs @@ -8,27 +8,28 @@ * Generated by: https://openapi-generator.tech */ -/// EventsBlocksRequest : EventsBlocksRequest is utilized to fetch a sequence of BlockEvents indicating which blocks were added and removed from storage to reach the current state. - +/// `EventsBlocksRequest` : `EventsBlocksRequest` is utilized to fetch a sequence of `BlockEvents` +/// indicating which blocks were added and removed from storage to reach the current state. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct EventsBlocksRequest { #[serde(rename = "network_identifier")] pub network_identifier: crate::NetworkIdentifier, - /// offset is the offset into the event stream to sync events from. If this field is not populated, we return the limit events backwards from tip. If this is set to 0, we start from the beginning. + /// offset is the offset into the event stream to sync events from. If this field is not + /// populated, we return the limit events backwards from tip. If this is set to 0, we start + /// from the beginning. #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] pub offset: Option, - /// limit is the maximum number of events to fetch in one call. The implementation may return <= limit events. + /// limit is the maximum number of events to fetch in one call. The implementation may return + /// <= limit events. #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] pub limit: Option, } impl EventsBlocksRequest { - /// EventsBlocksRequest is utilized to fetch a sequence of BlockEvents indicating which blocks were added and removed from storage to reach the current state. - pub fn new(network_identifier: crate::NetworkIdentifier) -> EventsBlocksRequest { - EventsBlocksRequest { - network_identifier, - offset: None, - limit: None, - } + /// `EventsBlocksRequest` is utilized to fetch a sequence of `BlockEvents` indicating which + /// blocks were added and removed from storage to reach the current state. + #[must_use] + pub const fn new(network_identifier: crate::NetworkIdentifier) -> Self { + Self { network_identifier, offset: None, limit: None } } } diff --git a/rosetta-types/src/events_blocks_response.rs b/rosetta-types/src/events_blocks_response.rs index 96772e18..fab246c8 100644 --- a/rosetta-types/src/events_blocks_response.rs +++ b/rosetta-types/src/events_blocks_response.rs @@ -8,24 +8,25 @@ * Generated by: https://openapi-generator.tech */ -/// EventsBlocksResponse : EventsBlocksResponse contains an ordered collection of BlockEvents and the max retrievable sequence. - +/// `EventsBlocksResponse` contains an ordered collection of `BlockEvents` and the max retrievable +/// sequence. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct EventsBlocksResponse { /// max_sequence is the maximum available sequence number to fetch. #[serde(rename = "max_sequence")] pub max_sequence: i64, - /// events is an array of BlockEvents indicating the order to add and remove blocks to maintain a canonical view of blockchain state. Lightweight clients can use this event stream to update state without implementing their own block syncing logic. + /// events is an array of BlockEvents indicating the order to add and remove blocks to maintain + /// a canonical view of blockchain state. Lightweight clients can use this event stream to + /// update state without implementing their own block syncing logic. #[serde(rename = "events")] pub events: Vec, } impl EventsBlocksResponse { - /// EventsBlocksResponse contains an ordered collection of BlockEvents and the max retrievable sequence. - pub fn new(max_sequence: i64, events: Vec) -> EventsBlocksResponse { - EventsBlocksResponse { - max_sequence, - events, - } + /// `EventsBlocksResponse` contains an ordered collection of `BlockEvents` and the max + /// retrievable sequence. + #[must_use] + pub const fn new(max_sequence: i64, events: Vec) -> Self { + Self { max_sequence, events } } } diff --git a/rosetta-types/src/exemption_type.rs b/rosetta-types/src/exemption_type.rs index 26698516..6201cc32 100644 --- a/rosetta-types/src/exemption_type.rs +++ b/rosetta-types/src/exemption_type.rs @@ -8,9 +8,21 @@ * Generated by: https://openapi-generator.tech */ -/// ExemptionType : ExemptionType is used to indicate if the live balance for an account subject to a BalanceExemption could increase above, decrease below, or equal the computed balance. * greater_or_equal: The live balance may increase above or equal the computed balance. This typically occurs with staking rewards that accrue on each block. * less_or_equal: The live balance may decrease below or equal the computed balance. This typically occurs as balance moves from locked to spendable on a vesting account. * dynamic: The live balance may increase above, decrease below, or equal the computed balance. This typically occurs with tokens that have a dynamic supply. - -/// ExemptionType is used to indicate if the live balance for an account subject to a BalanceExemption could increase above, decrease below, or equal the computed balance. * greater_or_equal: The live balance may increase above or equal the computed balance. This typically occurs with staking rewards that accrue on each block. * less_or_equal: The live balance may decrease below or equal the computed balance. This typically occurs as balance moves from locked to spendable on a vesting account. * dynamic: The live balance may increase above, decrease below, or equal the computed balance. This typically occurs with tokens that have a dynamic supply. +/// `ExemptionType` : `ExemptionType` is used to indicate if the live balance for an account subject +/// to a `BalanceExemption` could increase above, decrease below, or equal the computed balance. * +/// `greater_or_equal`: The live balance may increase above or equal the computed balance. This +/// typically occurs with staking rewards that accrue on each block. * `less_or_equal`: The live +/// balance may decrease below or equal the computed balance. This typically occurs as balance +/// moves from locked to spendable on a vesting account. * dynamic: The live balance may increase +/// above, decrease below, or equal the computed balance. This typically occurs with tokens that +/// have a dynamic supply. `ExemptionType` is used to indicate if the live balance for an account +/// subject to a `BalanceExemption` could increase above, decrease below, or equal the computed +/// balance. * `greater_or_equal`: The live balance may increase above or equal the computed +/// balance. This typically occurs with staking rewards that accrue on each block. * +/// `less_or_equal`: The live balance may decrease below or equal the computed balance. This +/// typically occurs as balance moves from locked to spendable on a vesting account. * dynamic: +/// The live balance may increase above, decrease below, or equal the computed balance. This +/// typically occurs with tokens that have a dynamic supply. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum ExemptionType { #[serde(rename = "greater_or_equal")] @@ -32,7 +44,7 @@ impl ToString for ExemptionType { } impl Default for ExemptionType { - fn default() -> ExemptionType { + fn default() -> Self { Self::GreaterOrEqual } } diff --git a/rosetta-types/src/lib.rs b/rosetta-types/src/lib.rs index 4bc36ac2..10634a96 100644 --- a/rosetta-types/src/lib.rs +++ b/rosetta-types/src/lib.rs @@ -1,3 +1,5 @@ +#![allow(clippy::all)] + #[macro_use] extern crate serde; diff --git a/rosetta-types/src/mempool_response.rs b/rosetta-types/src/mempool_response.rs index e80f7562..64d0a1d8 100644 --- a/rosetta-types/src/mempool_response.rs +++ b/rosetta-types/src/mempool_response.rs @@ -8,8 +8,8 @@ * Generated by: https://openapi-generator.tech */ -/// MempoolResponse : A MempoolResponse contains all transaction identifiers in the mempool for a particular network_identifier. - +/// A `MempoolResponse` contains all transaction identifiers in the mempool for a particular +/// `network_identifier`. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct MempoolResponse { #[serde(rename = "transaction_identifiers")] @@ -17,10 +17,10 @@ pub struct MempoolResponse { } impl MempoolResponse { - /// A MempoolResponse contains all transaction identifiers in the mempool for a particular network_identifier. - pub fn new(transaction_identifiers: Vec) -> MempoolResponse { - MempoolResponse { - transaction_identifiers, - } + /// A `MempoolResponse` contains all transaction identifiers in the mempool for a particular + /// `network_identifier`. + #[must_use] + pub const fn new(transaction_identifiers: Vec) -> Self { + Self { transaction_identifiers } } } diff --git a/rosetta-types/src/mempool_transaction_request.rs b/rosetta-types/src/mempool_transaction_request.rs index 690a638f..4846a103 100644 --- a/rosetta-types/src/mempool_transaction_request.rs +++ b/rosetta-types/src/mempool_transaction_request.rs @@ -8,8 +8,7 @@ * Generated by: https://openapi-generator.tech */ -/// MempoolTransactionRequest : A MempoolTransactionRequest is utilized to retrieve a transaction from the mempool. - +/// A `MempoolTransactionRequest` is utilized to retrieve a transaction from the mempool. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct MempoolTransactionRequest { #[serde(rename = "network_identifier")] @@ -19,14 +18,12 @@ pub struct MempoolTransactionRequest { } impl MempoolTransactionRequest { - /// A MempoolTransactionRequest is utilized to retrieve a transaction from the mempool. - pub fn new( + /// A `MempoolTransactionRequest` is utilized to retrieve a transaction from the mempool. + #[must_use] + pub const fn new( network_identifier: crate::NetworkIdentifier, transaction_identifier: crate::TransactionIdentifier, - ) -> MempoolTransactionRequest { - MempoolTransactionRequest { - network_identifier, - transaction_identifier, - } + ) -> Self { + Self { network_identifier, transaction_identifier } } } diff --git a/rosetta-types/src/mempool_transaction_response.rs b/rosetta-types/src/mempool_transaction_response.rs index 21b33cd1..e3940063 100644 --- a/rosetta-types/src/mempool_transaction_response.rs +++ b/rosetta-types/src/mempool_transaction_response.rs @@ -8,8 +8,8 @@ * Generated by: https://openapi-generator.tech */ -/// MempoolTransactionResponse : A MempoolTransactionResponse contains an estimate of a mempool transaction. It may not be possible to know the full impact of a transaction in the mempool (ex: fee paid). - +/// A `MempoolTransactionResponse` contains an estimate of a mempool transaction. It may not be +/// possible to know the full impact of a transaction in the mempool (ex: fee paid). #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct MempoolTransactionResponse { #[serde(rename = "transaction")] @@ -19,11 +19,10 @@ pub struct MempoolTransactionResponse { } impl MempoolTransactionResponse { - /// A MempoolTransactionResponse contains an estimate of a mempool transaction. It may not be possible to know the full impact of a transaction in the mempool (ex: fee paid). - pub fn new(transaction: crate::Transaction) -> MempoolTransactionResponse { - MempoolTransactionResponse { - transaction, - metadata: None, - } + /// A `MempoolTransactionResponse` contains an estimate of a mempool transaction. It may not be + /// possible to know the full impact of a transaction in the mempool (ex: fee paid). + #[must_use] + pub const fn new(transaction: crate::Transaction) -> Self { + Self { transaction, metadata: None } } } diff --git a/rosetta-types/src/metadata_request.rs b/rosetta-types/src/metadata_request.rs index 59c46a64..05c77b71 100644 --- a/rosetta-types/src/metadata_request.rs +++ b/rosetta-types/src/metadata_request.rs @@ -8,8 +8,7 @@ * Generated by: https://openapi-generator.tech */ -/// MetadataRequest : A MetadataRequest is utilized in any request where the only argument is optional metadata. - +/// A `MetadataRequest` is utilized in any request where the only argument is optional metadata. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct MetadataRequest { #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] @@ -17,8 +16,9 @@ pub struct MetadataRequest { } impl MetadataRequest { - /// A MetadataRequest is utilized in any request where the only argument is optional metadata. - pub fn new() -> MetadataRequest { - MetadataRequest { metadata: None } + /// A `MetadataRequest` is utilized in any request where the only argument is optional metadata. + #[must_use] + pub const fn new() -> Self { + Self { metadata: None } } } diff --git a/rosetta-types/src/network_identifier.rs b/rosetta-types/src/network_identifier.rs index a688eb92..d3da20c0 100644 --- a/rosetta-types/src/network_identifier.rs +++ b/rosetta-types/src/network_identifier.rs @@ -8,29 +8,24 @@ * Generated by: https://openapi-generator.tech */ -/// NetworkIdentifier : The network_identifier specifies which network a particular object is associated with. - +/// The `network_identifier` specifies which network a particular object is associated with. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct NetworkIdentifier { #[serde(rename = "blockchain")] pub blockchain: String, - /// If a blockchain has a specific chain-id or network identifier, it should go in this field. It is up to the client to determine which network-specific identifier is mainnet or testnet. + /// If a blockchain has a specific chain-id or network identifier, it should go in this field. + /// It is up to the client to determine which network-specific identifier is mainnet or + /// testnet. #[serde(rename = "network")] pub network: String, - #[serde( - rename = "sub_network_identifier", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "sub_network_identifier", skip_serializing_if = "Option::is_none")] pub sub_network_identifier: Option, } impl NetworkIdentifier { - /// The network_identifier specifies which network a particular object is associated with. - pub fn new(blockchain: String, network: String) -> NetworkIdentifier { - NetworkIdentifier { - blockchain, - network, - sub_network_identifier: None, - } + /// The `network_identifier` specifies which network a particular object is associated with. + #[must_use] + pub const fn new(blockchain: String, network: String) -> Self { + Self { blockchain, network, sub_network_identifier: None } } } diff --git a/rosetta-types/src/network_list_response.rs b/rosetta-types/src/network_list_response.rs index b6fe612b..48470d39 100644 --- a/rosetta-types/src/network_list_response.rs +++ b/rosetta-types/src/network_list_response.rs @@ -8,8 +8,8 @@ * Generated by: https://openapi-generator.tech */ -/// NetworkListResponse : A NetworkListResponse contains all NetworkIdentifiers that the node can serve information for. - +/// A `NetworkListResponse` contains all `NetworkIdentifiers` that the node can serve information +/// for. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct NetworkListResponse { #[serde(rename = "network_identifiers")] @@ -17,10 +17,10 @@ pub struct NetworkListResponse { } impl NetworkListResponse { - /// A NetworkListResponse contains all NetworkIdentifiers that the node can serve information for. - pub fn new(network_identifiers: Vec) -> NetworkListResponse { - NetworkListResponse { - network_identifiers, - } + /// A `NetworkListResponse` contains all `NetworkIdentifiers` that the node can serve + /// information for. + #[must_use] + pub const fn new(network_identifiers: Vec) -> Self { + Self { network_identifiers } } } diff --git a/rosetta-types/src/network_options_response.rs b/rosetta-types/src/network_options_response.rs index 05fde85e..af45801b 100644 --- a/rosetta-types/src/network_options_response.rs +++ b/rosetta-types/src/network_options_response.rs @@ -8,8 +8,8 @@ * Generated by: https://openapi-generator.tech */ -/// NetworkOptionsResponse : NetworkOptionsResponse contains information about the versioning of the node and the allowed operation statuses, operation types, and errors. - +/// `NetworkOptionsResponse` contains information about the versioning of the node and the allowed +/// operation statuses, operation types, and errors. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct NetworkOptionsResponse { #[serde(rename = "version")] @@ -19,11 +19,10 @@ pub struct NetworkOptionsResponse { } impl NetworkOptionsResponse { - /// NetworkOptionsResponse contains information about the versioning of the node and the allowed operation statuses, operation types, and errors. - pub fn new(version: crate::Version) -> NetworkOptionsResponse { - NetworkOptionsResponse { - version, - allow: None, - } + /// `NetworkOptionsResponse` contains information about the versioning of the node and the + /// allowed operation statuses, operation types, and errors. + #[must_use] + pub const fn new(version: crate::Version) -> Self { + Self { version, allow: None } } } diff --git a/rosetta-types/src/network_request.rs b/rosetta-types/src/network_request.rs index f78ac4bb..d4adf22a 100644 --- a/rosetta-types/src/network_request.rs +++ b/rosetta-types/src/network_request.rs @@ -8,8 +8,8 @@ * Generated by: https://openapi-generator.tech */ -/// NetworkRequest : A NetworkRequest is utilized to retrieve some data specific exclusively to a NetworkIdentifier. - +/// A `NetworkRequest` is utilized to retrieve some data specific exclusively to a +/// `NetworkIdentifier`. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct NetworkRequest { #[serde(rename = "network_identifier")] @@ -19,11 +19,10 @@ pub struct NetworkRequest { } impl NetworkRequest { - /// A NetworkRequest is utilized to retrieve some data specific exclusively to a NetworkIdentifier. - pub fn new(network_identifier: crate::NetworkIdentifier) -> NetworkRequest { - NetworkRequest { - network_identifier, - metadata: None, - } + /// A `NetworkRequest` is utilized to retrieve some data specific exclusively to a + /// `NetworkIdentifier`. + #[must_use] + pub const fn new(network_identifier: crate::NetworkIdentifier) -> Self { + Self { network_identifier, metadata: None } } } diff --git a/rosetta-types/src/network_status_response.rs b/rosetta-types/src/network_status_response.rs index 1c7e2a26..1947901a 100644 --- a/rosetta-types/src/network_status_response.rs +++ b/rosetta-types/src/network_status_response.rs @@ -8,23 +8,28 @@ * Generated by: https://openapi-generator.tech */ -/// NetworkStatusResponse : NetworkStatusResponse contains basic information about the node's view of a blockchain network. It is assumed that any BlockIdentifier.Index less than or equal to CurrentBlockIdentifier.Index can be queried. If a Rosetta implementation prunes historical state, it should populate the optional `oldest_block_identifier` field with the oldest block available to query. If this is not populated, it is assumed that the `genesis_block_identifier` is the oldest queryable block. If a Rosetta implementation performs some pre-sync before it is possible to query blocks, sync_status should be populated so that clients can still monitor healthiness. Without this field, it may appear that the implementation is stuck syncing and needs to be terminated. - +/// `NetworkStatusResponse` contains basic information about the node's view of a blockchain +/// network. It is assumed that any BlockIdentifier.Index less than or equal to +/// CurrentBlockIdentifier.Index can be queried. If a Rosetta implementation prunes historical +/// state, it should populate the optional `oldest_block_identifier` field with the oldest block +/// available to query. If this is not populated, it is assumed that the `genesis_block_identifier` +/// is the oldest queryable block. If a Rosetta implementation performs some pre-sync before it is +/// possible to query blocks, `sync_status` should be populated so that clients can still monitor +/// healthiness. Without this field, it may appear that the implementation is stuck syncing and +/// needs to be terminated. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct NetworkStatusResponse { #[serde(rename = "current_block_identifier")] pub current_block_identifier: crate::BlockIdentifier, - /// The timestamp of the block in milliseconds since the Unix Epoch. The timestamp is stored in milliseconds because some blockchains produce blocks more often than once a second. + /// The timestamp of the block in milliseconds since the Unix Epoch. The timestamp is stored in + /// milliseconds because some blockchains produce blocks more often than once a second. #[serde(rename = "current_block_timestamp")] pub current_block_timestamp: i64, // TODO: genesis_block_identifier is not nullable. rosetta-ethereum can't determine // it on dev net. #[serde(rename = "genesis_block_identifier")] pub genesis_block_identifier: Option, - #[serde( - rename = "oldest_block_identifier", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "oldest_block_identifier", skip_serializing_if = "Option::is_none")] pub oldest_block_identifier: Option, #[serde(rename = "latest_finalized_block_identifier")] pub latest_finalized_block_identifier: crate::BlockIdentifier, @@ -35,14 +40,23 @@ pub struct NetworkStatusResponse { } impl NetworkStatusResponse { - /// NetworkStatusResponse contains basic information about the node's view of a blockchain network. It is assumed that any BlockIdentifier.Index less than or equal to CurrentBlockIdentifier.Index can be queried. If a Rosetta implementation prunes historical state, it should populate the optional `oldest_block_identifier` field with the oldest block available to query. If this is not populated, it is assumed that the `genesis_block_identifier` is the oldest queryable block. If a Rosetta implementation performs some pre-sync before it is possible to query blocks, sync_status should be populated so that clients can still monitor healthiness. Without this field, it may appear that the implementation is stuck syncing and needs to be terminated. - pub fn new( + /// `NetworkStatusResponse` contains basic information about the node's view of a blockchain + /// network. It is assumed that any BlockIdentifier.Index less than or equal to + /// CurrentBlockIdentifier.Index can be queried. If a Rosetta implementation prunes historical + /// state, it should populate the optional `oldest_block_identifier` field with the oldest block + /// available to query. If this is not populated, it is assumed that the + /// `genesis_block_identifier` is the oldest queryable block. If a Rosetta implementation + /// performs some pre-sync before it is possible to query blocks, `sync_status` should be + /// populated so that clients can still monitor healthiness. Without this field, it may appear + /// that the implementation is stuck syncing and needs to be terminated. + #[must_use] + pub const fn new( current_block_identifier: crate::BlockIdentifier, current_block_timestamp: i64, genesis_block_identifier: crate::BlockIdentifier, latest_finalized_block_identifier: crate::BlockIdentifier, - ) -> NetworkStatusResponse { - NetworkStatusResponse { + ) -> Self { + Self { current_block_identifier, current_block_timestamp, genesis_block_identifier: Some(genesis_block_identifier), diff --git a/rosetta-types/src/operation.rs b/rosetta-types/src/operation.rs index 85fdac33..1d4472d1 100644 --- a/rosetta-types/src/operation.rs +++ b/rosetta-types/src/operation.rs @@ -8,19 +8,35 @@ * Generated by: https://openapi-generator.tech */ -/// Operation : Operations contain all balance-changing information within a transaction. They are always one-sided (only affect 1 AccountIdentifier) and can succeed or fail independently from a Transaction. Operations are used both to represent on-chain data (Data API) and to construct new transactions (Construction API), creating a standard interface for reading and writing to blockchains. - +/// `Operation` contain all balance-changing information within a transaction. They are always +/// one-sided (only affect 1 `AccountIdentifier`) and can succeed or fail independently from a +/// Transaction. Operations are used both to represent on-chain data (Data API) and to construct +/// new transactions (Construction API), creating a standard interface for reading and writing to +/// blockchains. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Operation { #[serde(rename = "operation_identifier")] pub operation_identifier: crate::OperationIdentifier, - /// Restrict referenced related_operations to identifier indices < the current operation_identifier.index. This ensures there exists a clear DAG-structure of relations. Since operations are one-sided, one could imagine relating operations in a single transfer or linking operations in a call tree. + /// Restrict referenced related_operations to identifier indices < the current + /// operation_identifier.index. This ensures there exists a clear DAG-structure of relations. + /// Since operations are one-sided, one could imagine relating operations in a single transfer + /// or linking operations in a call tree. #[serde(rename = "related_operations", skip_serializing_if = "Option::is_none")] pub related_operations: Option>, - /// Type is the network-specific type of the operation. Ensure that any type that can be returned here is also specified in the NetworkOptionsResponse. This can be very useful to downstream consumers that parse all block data. + /// Type is the network-specific type of the operation. Ensure that any type that can be + /// returned here is also specified in the NetworkOptionsResponse. This can be very useful to + /// downstream consumers that parse all block data. #[serde(rename = "type")] pub r#type: String, - /// Status is the network-specific status of the operation. Status is not defined on the transaction object because blockchains with smart contracts may have transactions that partially apply (some operations are successful and some are not). Blockchains with atomic transactions (all operations succeed or all operations fail) will have the same status for each operation. On-chain operations (operations retrieved in the `/block` and `/block/transaction` endpoints) MUST have a populated status field (anything on-chain must have succeeded or failed). However, operations provided during transaction construction (often times called \"intent\" in the documentation) MUST NOT have a populated status field (operations yet to be included on-chain have not yet succeeded or failed). + /// Status is the network-specific status of the operation. Status is not defined on the + /// transaction object because blockchains with smart contracts may have transactions that + /// partially apply (some operations are successful and some are not). Blockchains with atomic + /// transactions (all operations succeed or all operations fail) will have the same status for + /// each operation. On-chain operations (operations retrieved in the `/block` and + /// `/block/transaction` endpoints) MUST have a populated status field (anything on-chain must + /// have succeeded or failed). However, operations provided during transaction construction + /// (often times called \"intent\" in the documentation) MUST NOT have a populated status field + /// (operations yet to be included on-chain have not yet succeeded or failed). #[serde(rename = "status", skip_serializing_if = "Option::is_none")] pub status: Option, #[serde(rename = "account", skip_serializing_if = "Option::is_none")] @@ -34,9 +50,14 @@ pub struct Operation { } impl Operation { - /// Operations contain all balance-changing information within a transaction. They are always one-sided (only affect 1 AccountIdentifier) and can succeed or fail independently from a Transaction. Operations are used both to represent on-chain data (Data API) and to construct new transactions (Construction API), creating a standard interface for reading and writing to blockchains. - pub fn new(operation_identifier: crate::OperationIdentifier, r#type: String) -> Operation { - Operation { + /// Operations contain all balance-changing information within a transaction. They are always + /// one-sided (only affect 1 `AccountIdentifier`) and can succeed or fail independently from a + /// Transaction. Operations are used both to represent on-chain data (Data API) and to + /// construct new transactions (Construction API), creating a standard interface for reading and + /// writing to blockchains. + #[must_use] + pub const fn new(operation_identifier: crate::OperationIdentifier, r#type: String) -> Self { + Self { operation_identifier, related_operations: None, r#type, diff --git a/rosetta-types/src/operation_identifier.rs b/rosetta-types/src/operation_identifier.rs index 53f19690..a10b72f0 100644 --- a/rosetta-types/src/operation_identifier.rs +++ b/rosetta-types/src/operation_identifier.rs @@ -8,24 +8,27 @@ * Generated by: https://openapi-generator.tech */ -/// OperationIdentifier : The operation_identifier uniquely identifies an operation within a transaction. - +/// The `operation_identifier` uniquely identifies an operation within a transaction. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct OperationIdentifier { - /// The operation index is used to ensure each operation has a unique identifier within a transaction. This index is only relative to the transaction and NOT GLOBAL. The operations in each transaction should start from index 0. To clarify, there may not be any notion of an operation index in the blockchain being described. + /// The operation index is used to ensure each operation has a unique identifier within a + /// transaction. This index is only relative to the transaction and NOT GLOBAL. The operations + /// in each transaction should start from index 0. To clarify, there may not be any notion of + /// an operation index in the blockchain being described. #[serde(rename = "index")] pub index: i64, - /// Some blockchains specify an operation index that is essential for client use. For example, Bitcoin uses a network_index to identify which UTXO was used in a transaction. network_index should not be populated if there is no notion of an operation index in a blockchain (typically most account-based blockchains). + /// Some blockchains specify an operation index that is essential for client use. For example, + /// Bitcoin uses a network_index to identify which UTXO was used in a transaction. + /// network_index should not be populated if there is no notion of an operation index in a + /// blockchain (typically most account-based blockchains). #[serde(rename = "network_index", skip_serializing_if = "Option::is_none")] pub network_index: Option, } impl OperationIdentifier { - /// The operation_identifier uniquely identifies an operation within a transaction. - pub fn new(index: i64) -> OperationIdentifier { - OperationIdentifier { - index, - network_index: None, - } + /// The `operation_identifier` uniquely identifies an operation within a transaction. + #[must_use] + pub const fn new(index: i64) -> Self { + Self { index, network_index: None } } } diff --git a/rosetta-types/src/operation_status.rs b/rosetta-types/src/operation_status.rs index bdb415fd..cb7c49cc 100644 --- a/rosetta-types/src/operation_status.rs +++ b/rosetta-types/src/operation_status.rs @@ -8,21 +8,27 @@ * Generated by: https://openapi-generator.tech */ -/// OperationStatus : OperationStatus is utilized to indicate which Operation status are considered successful. - +/// `OperationStatus` : `OperationStatus` is utilized to indicate which Operation status are +/// considered successful. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct OperationStatus { /// The status is the network-specific status of the operation. #[serde(rename = "status")] pub status: String, - /// An Operation is considered successful if the Operation.Amount should affect the Operation.Account. Some blockchains (like Bitcoin) only include successful operations in blocks but other blockchains (like Ethereum) include unsuccessful operations that incur a fee. To reconcile the computed balance from the stream of Operations, it is critical to understand which Operation.Status indicate an Operation is successful and should affect an Account. + /// An Operation is considered successful if the Operation.Amount should affect the + /// Operation.Account. Some blockchains (like Bitcoin) only include successful operations in + /// blocks but other blockchains (like Ethereum) include unsuccessful operations that incur a + /// fee. To reconcile the computed balance from the stream of Operations, it is critical to + /// understand which Operation.Status indicate an Operation is successful and should affect an + /// Account. #[serde(rename = "successful")] pub successful: bool, } impl OperationStatus { - /// OperationStatus is utilized to indicate which Operation status are considered successful. - pub fn new(status: String, successful: bool) -> OperationStatus { - OperationStatus { status, successful } + /// `OperationStatus` is utilized to indicate which Operation status are considered successful. + #[must_use] + pub const fn new(status: String, successful: bool) -> Self { + Self { status, successful } } } diff --git a/rosetta-types/src/operator.rs b/rosetta-types/src/operator.rs index 3a8bfd7a..ae40e0d8 100644 --- a/rosetta-types/src/operator.rs +++ b/rosetta-types/src/operator.rs @@ -8,9 +8,10 @@ * Generated by: https://openapi-generator.tech */ -/// Operator : Operator is used by query-related endpoints to determine how to apply conditions. If this field is not populated, the default `and` value will be used. - -/// Operator is used by query-related endpoints to determine how to apply conditions. If this field is not populated, the default `and` value will be used. +/// Operator : Operator is used by query-related endpoints to determine how to apply conditions. If +/// this field is not populated, the default `and` value will be used. Operator is used by +/// query-related endpoints to determine how to apply conditions. If this field is not populated, +/// the default `and` value will be used. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Operator { #[serde(rename = "or")] @@ -29,7 +30,7 @@ impl ToString for Operator { } impl Default for Operator { - fn default() -> Operator { + fn default() -> Self { Self::Or } } diff --git a/rosetta-types/src/partial_block_identifier.rs b/rosetta-types/src/partial_block_identifier.rs index f2ecc13d..fa397012 100644 --- a/rosetta-types/src/partial_block_identifier.rs +++ b/rosetta-types/src/partial_block_identifier.rs @@ -8,8 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// PartialBlockIdentifier : When fetching data by BlockIdentifier, it may be possible to only specify the index or hash. If neither property is specified, it is assumed that the client is making a request at the current block. - +/// `PartialBlockIdentifier` : When fetching data by `BlockIdentifier`, it may be possible to only +/// specify the index or hash. If neither property is specified, it is assumed that the client is +/// making a request at the current block. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct PartialBlockIdentifier { #[serde(rename = "index", skip_serializing_if = "Option::is_none")] @@ -19,11 +20,11 @@ pub struct PartialBlockIdentifier { } impl PartialBlockIdentifier { - /// When fetching data by BlockIdentifier, it may be possible to only specify the index or hash. If neither property is specified, it is assumed that the client is making a request at the current block. - pub fn new() -> PartialBlockIdentifier { - PartialBlockIdentifier { - index: None, - hash: None, - } + /// When fetching data by `BlockIdentifier`, it may be possible to only specify the index or + /// hash. If neither property is specified, it is assumed that the client is making a request at + /// the current block. + #[must_use] + pub const fn new() -> Self { + Self { index: None, hash: None } } } diff --git a/rosetta-types/src/peer.rs b/rosetta-types/src/peer.rs index 1a754e38..86014222 100644 --- a/rosetta-types/src/peer.rs +++ b/rosetta-types/src/peer.rs @@ -9,7 +9,6 @@ */ /// Peer : A Peer is a representation of a node's peer. - #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Peer { #[serde(rename = "peer_id")] @@ -20,10 +19,8 @@ pub struct Peer { impl Peer { /// A Peer is a representation of a node's peer. - pub fn new(peer_id: String) -> Peer { - Peer { - peer_id, - metadata: None, - } + #[must_use] + pub const fn new(peer_id: String) -> Self { + Self { peer_id, metadata: None } } } diff --git a/rosetta-types/src/public_key.rs b/rosetta-types/src/public_key.rs index 731d262f..538f307b 100644 --- a/rosetta-types/src/public_key.rs +++ b/rosetta-types/src/public_key.rs @@ -8,8 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// PublicKey : PublicKey contains a public key byte array for a particular CurveType encoded in hex. Note that there is no PrivateKey struct as this is NEVER the concern of an implementation. - +/// `PublicKey` : `PublicKey` contains a public key byte array for a particular `CurveType` encoded +/// in hex. Note that there is no `PrivateKey` struct as this is NEVER the concern of an +/// implementation. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct PublicKey { /// Hex-encoded public key bytes in the format specified by the CurveType. @@ -20,11 +21,10 @@ pub struct PublicKey { } impl PublicKey { - /// PublicKey contains a public key byte array for a particular CurveType encoded in hex. Note that there is no PrivateKey struct as this is NEVER the concern of an implementation. - pub fn new(hex_bytes: String, curve_type: crate::CurveType) -> PublicKey { - PublicKey { - hex_bytes, - curve_type, - } + /// `PublicKey` contains a public key byte array for a particular `CurveType` encoded in hex. + /// Note that there is no `PrivateKey` struct as this is NEVER the concern of an implementation. + #[must_use] + pub const fn new(hex_bytes: String, curve_type: crate::CurveType) -> Self { + Self { hex_bytes, curve_type } } } diff --git a/rosetta-types/src/related_transaction.rs b/rosetta-types/src/related_transaction.rs index 7cef6615..97a520a2 100644 --- a/rosetta-types/src/related_transaction.rs +++ b/rosetta-types/src/related_transaction.rs @@ -8,8 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// RelatedTransaction : The related_transaction allows implementations to link together multiple transactions. An unpopulated network identifier indicates that the related transaction is on the same network. - +/// `RelatedTransaction` : The `related_transaction` allows implementations to link together +/// multiple transactions. An unpopulated network identifier indicates that the related transaction +/// is on the same network. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct RelatedTransaction { #[serde(rename = "network_identifier", skip_serializing_if = "Option::is_none")] @@ -21,15 +22,14 @@ pub struct RelatedTransaction { } impl RelatedTransaction { - /// The related_transaction allows implementations to link together multiple transactions. An unpopulated network identifier indicates that the related transaction is on the same network. - pub fn new( + /// The `related_transaction` allows implementations to link together multiple transactions. An + /// unpopulated network identifier indicates that the related transaction is on the same + /// network. + #[must_use] + pub const fn new( transaction_identifier: crate::TransactionIdentifier, direction: crate::Direction, - ) -> RelatedTransaction { - RelatedTransaction { - network_identifier: None, - transaction_identifier, - direction, - } + ) -> Self { + Self { network_identifier: None, transaction_identifier, direction } } } diff --git a/rosetta-types/src/search_transactions_request.rs b/rosetta-types/src/search_transactions_request.rs index 89ae6cf3..c8ea27fe 100644 --- a/rosetta-types/src/search_transactions_request.rs +++ b/rosetta-types/src/search_transactions_request.rs @@ -8,27 +8,30 @@ * Generated by: https://openapi-generator.tech */ -/// SearchTransactionsRequest : SearchTransactionsRequest is used to search for transactions matching a set of provided conditions in canonical blocks. - +/// `SearchTransactionsRequest` : `SearchTransactionsRequest` is used to search for transactions +/// matching a set of provided conditions in canonical blocks. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct SearchTransactionsRequest { #[serde(rename = "network_identifier")] pub network_identifier: crate::NetworkIdentifier, #[serde(rename = "operator", skip_serializing_if = "Option::is_none")] pub operator: Option, - /// max_block is the largest block index to consider when searching for transactions. If this field is not populated, the current block is considered the max_block. If you do not specify a max_block, it is possible a newly synced block will interfere with paginated transaction queries (as the offset could become invalid with newly added rows). + /// max_block is the largest block index to consider when searching for transactions. If this + /// field is not populated, the current block is considered the max_block. If you do not + /// specify a max_block, it is possible a newly synced block will interfere with paginated + /// transaction queries (as the offset could become invalid with newly added rows). #[serde(rename = "max_block", skip_serializing_if = "Option::is_none")] pub max_block: Option, - /// offset is the offset into the query result to start returning transactions. If any search conditions are changed, the query offset will change and you must restart your search iteration. + /// offset is the offset into the query result to start returning transactions. If any search + /// conditions are changed, the query offset will change and you must restart your search + /// iteration. #[serde(rename = "offset", skip_serializing_if = "Option::is_none")] pub offset: Option, - /// limit is the maximum number of transactions to return in one call. The implementation may return <= limit transactions. + /// limit is the maximum number of transactions to return in one call. The implementation may + /// return <= limit transactions. #[serde(rename = "limit", skip_serializing_if = "Option::is_none")] pub limit: Option, - #[serde( - rename = "transaction_identifier", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "transaction_identifier", skip_serializing_if = "Option::is_none")] pub transaction_identifier: Option, #[serde(rename = "account_identifier", skip_serializing_if = "Option::is_none")] pub account_identifier: Option, @@ -42,18 +45,22 @@ pub struct SearchTransactionsRequest { /// type is the network-specific operation type. #[serde(rename = "type", skip_serializing_if = "Option::is_none")] pub r#type: Option, - /// address is AccountIdentifier.Address. This is used to get all transactions related to an AccountIdentifier.Address, regardless of SubAccountIdentifier. + /// address is AccountIdentifier.Address. This is used to get all transactions related to an + /// AccountIdentifier.Address, regardless of SubAccountIdentifier. #[serde(rename = "address", skip_serializing_if = "Option::is_none")] pub address: Option, - /// success is a synthetic condition populated by parsing network-specific operation statuses (using the mapping provided in `/network/options`). + /// success is a synthetic condition populated by parsing network-specific operation statuses + /// (using the mapping provided in `/network/options`). #[serde(rename = "success", skip_serializing_if = "Option::is_none")] pub success: Option, } impl SearchTransactionsRequest { - /// SearchTransactionsRequest is used to search for transactions matching a set of provided conditions in canonical blocks. - pub fn new(network_identifier: crate::NetworkIdentifier) -> SearchTransactionsRequest { - SearchTransactionsRequest { + /// `SearchTransactionsRequest` is used to search for transactions matching a set of provided + /// conditions in canonical blocks. + #[must_use] + pub const fn new(network_identifier: crate::NetworkIdentifier) -> Self { + Self { network_identifier, operator: None, max_block: None, diff --git a/rosetta-types/src/search_transactions_response.rs b/rosetta-types/src/search_transactions_response.rs index a25b448f..5db368cc 100644 --- a/rosetta-types/src/search_transactions_response.rs +++ b/rosetta-types/src/search_transactions_response.rs @@ -8,31 +8,34 @@ * Generated by: https://openapi-generator.tech */ -/// SearchTransactionsResponse : SearchTransactionsResponse contains an ordered collection of BlockTransactions that match the query in SearchTransactionsRequest. These BlockTransactions are sorted from most recent block to oldest block. - +/// `SearchTransactionsResponse` : `SearchTransactionsResponse` contains an ordered collection of +/// `BlockTransactions` that match the query in `SearchTransactionsRequest`. These +/// `BlockTransactions` are sorted from most recent block to oldest block. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct SearchTransactionsResponse { - /// transactions is an array of BlockTransactions sorted by most recent BlockIdentifier (meaning that transactions in recent blocks appear first). If there are many transactions for a particular search, transactions may not contain all matching transactions. It is up to the caller to paginate these transactions using the max_block field. + /// transactions is an array of BlockTransactions sorted by most recent BlockIdentifier + /// (meaning that transactions in recent blocks appear first). If there are many transactions + /// for a particular search, transactions may not contain all matching transactions. It is up + /// to the caller to paginate these transactions using the max_block field. #[serde(rename = "transactions")] pub transactions: Vec, - /// total_count is the number of results for a given search. Callers typically use this value to concurrently fetch results by offset or to display a virtual page number associated with results. + /// total_count is the number of results for a given search. Callers typically use this value + /// to concurrently fetch results by offset or to display a virtual page number associated with + /// results. #[serde(rename = "total_count")] pub total_count: i64, - /// next_offset is the next offset to use when paginating through transaction results. If this field is not populated, there are no more transactions to query. + /// next_offset is the next offset to use when paginating through transaction results. If this + /// field is not populated, there are no more transactions to query. #[serde(rename = "next_offset", skip_serializing_if = "Option::is_none")] pub next_offset: Option, } impl SearchTransactionsResponse { - /// SearchTransactionsResponse contains an ordered collection of BlockTransactions that match the query in SearchTransactionsRequest. These BlockTransactions are sorted from most recent block to oldest block. - pub fn new( - transactions: Vec, - total_count: i64, - ) -> SearchTransactionsResponse { - SearchTransactionsResponse { - transactions, - total_count, - next_offset: None, - } + /// `SearchTransactionsResponse` contains an ordered collection of `BlockTransactions` that + /// match the query in `SearchTransactionsRequest`. These `BlockTransactions` are sorted from + /// most recent block to oldest block. + #[must_use] + pub const fn new(transactions: Vec, total_count: i64) -> Self { + Self { transactions, total_count, next_offset: None } } } diff --git a/rosetta-types/src/signature.rs b/rosetta-types/src/signature.rs index 7fcc02b4..058bd998 100644 --- a/rosetta-types/src/signature.rs +++ b/rosetta-types/src/signature.rs @@ -8,8 +8,10 @@ * Generated by: https://openapi-generator.tech */ -/// Signature : Signature contains the payload that was signed, the public keys of the keypairs used to produce the signature, the signature (encoded in hex), and the SignatureType. PublicKey is often times not known during construction of the signing payloads but may be needed to combine signatures properly. - +/// Signature : Signature contains the payload that was signed, the public keys of the keypairs used +/// to produce the signature, the signature (encoded in hex), and the `SignatureType`. `PublicKey` +/// is often times not known during construction of the signing payloads but may be needed to +/// combine signatures properly. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Signature { #[serde(rename = "signing_payload")] @@ -23,18 +25,17 @@ pub struct Signature { } impl Signature { - /// Signature contains the payload that was signed, the public keys of the keypairs used to produce the signature, the signature (encoded in hex), and the SignatureType. PublicKey is often times not known during construction of the signing payloads but may be needed to combine signatures properly. - pub fn new( + /// Signature contains the payload that was signed, the public keys of the keypairs used to + /// produce the signature, the signature (encoded in hex), and the `SignatureType`. `PublicKey` + /// is often times not known during construction of the signing payloads but may be needed to + /// combine signatures properly. + #[must_use] + pub const fn new( signing_payload: crate::SigningPayload, public_key: crate::PublicKey, signature_type: crate::SignatureType, hex_bytes: String, - ) -> Signature { - Signature { - signing_payload, - public_key, - signature_type, - hex_bytes, - } + ) -> Self { + Self { signing_payload, public_key, signature_type, hex_bytes } } } diff --git a/rosetta-types/src/signature_type.rs b/rosetta-types/src/signature_type.rs index be1d89d7..6fd6155f 100644 --- a/rosetta-types/src/signature_type.rs +++ b/rosetta-types/src/signature_type.rs @@ -8,9 +8,17 @@ * Generated by: https://openapi-generator.tech */ -/// SignatureType : SignatureType is the type of a cryptographic signature. * ecdsa: `r (32-bytes) || s (32-bytes)` - `64 bytes` * ecdsa_recovery: `r (32-bytes) || s (32-bytes) || v (1-byte)` - `65 bytes` * ed25519: `R (32-byte) || s (32-bytes)` - `64 bytes` * schnorr_1: `r (32-bytes) || s (32-bytes)` - `64 bytes` (schnorr signature implemented by Zilliqa where both `r` and `s` are scalars encoded as `32-bytes` values, most significant byte first.) * schnorr_poseidon: `r (32-bytes) || s (32-bytes)` where s = Hash(1st pk || 2nd pk || r) - `64 bytes` (schnorr signature w/ Poseidon hash function implemented by O(1) Labs where both `r` and `s` are scalars encoded as `32-bytes` values, least significant byte first. https://github.com/CodaProtocol/signer-reference/blob/master/schnorr.ml ) - -/// SignatureType is the type of a cryptographic signature. * ecdsa: `r (32-bytes) || s (32-bytes)` - `64 bytes` * ecdsa_recovery: `r (32-bytes) || s (32-bytes) || v (1-byte)` - `65 bytes` * ed25519: `R (32-byte) || s (32-bytes)` - `64 bytes` * schnorr_1: `r (32-bytes) || s (32-bytes)` - `64 bytes` (schnorr signature implemented by Zilliqa where both `r` and `s` are scalars encoded as `32-bytes` values, most significant byte first.) * schnorr_poseidon: `r (32-bytes) || s (32-bytes)` where s = Hash(1st pk || 2nd pk || r) - `64 bytes` (schnorr signature w/ Poseidon hash function implemented by O(1) Labs where both `r` and `s` are scalars encoded as `32-bytes` values, least significant byte first. https://github.com/CodaProtocol/signer-reference/blob/master/schnorr.ml ) +/// `SignatureType` is the type of a cryptographic signature. +/// * `ecdsa`: `r (32-bytes) || s (32-bytes)` - `64 bytes` +/// * `ecdsa_recovery`: `r (32-bytes) || s (32-bytes) || v (1-byte)` - `65 bytes` +/// * `ed25519`: `R (32-byte) || s (32-bytes)` - `64 bytes` +/// * `schnorr_1`: `r (32-bytes) || s (32-bytes)` - `64 bytes` (schnorr signature implemented by +/// Zilliqa where both `r` and `s` are scalars encoded as `32-bytes` values, most significant byte +/// first.) +/// * `schnorr_poseidon`: `r (32-bytes) || s (32-bytes)` where s = Hash(1st pk || 2nd pk || r) - `64 +/// bytes` (schnorr signature w/ Poseidon hash function implemented by O(1) Labs where both `r` and +/// `s` are scalars encoded as `32-bytes` values, least significant byte first. [reference](https://github.com/CodaProtocol/signer-reference/blob/master/schnorr.ml) +/// ) #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum SignatureType { #[serde(rename = "ecdsa")] @@ -41,7 +49,7 @@ impl ToString for SignatureType { } impl Default for SignatureType { - fn default() -> SignatureType { + fn default() -> Self { Self::Ecdsa } } diff --git a/rosetta-types/src/signing_payload.rs b/rosetta-types/src/signing_payload.rs index 6149d576..aece3af2 100644 --- a/rosetta-types/src/signing_payload.rs +++ b/rosetta-types/src/signing_payload.rs @@ -8,11 +8,14 @@ * Generated by: https://openapi-generator.tech */ -/// SigningPayload : SigningPayload is signed by the client with the keypair associated with an AccountIdentifier using the specified SignatureType. SignatureType can be optionally populated if there is a restriction on the signature scheme that can be used to sign the payload. - +/// `SigningPayload` : `SigningPayload` is signed by the client with the keypair associated with an +/// `AccountIdentifier` using the specified `SignatureType`. `SignatureType` can be optionally +/// populated if there is a restriction on the signature scheme that can be used to sign the +/// payload. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct SigningPayload { - /// [DEPRECATED by `account_identifier` in `v1.4.4`] The network-specific address of the account that should sign the payload. + /// [DEPRECATED by `account_identifier` in `v1.4.4`] The network-specific address of the + /// account that should sign the payload. #[serde(rename = "address", skip_serializing_if = "Option::is_none")] pub address: Option, #[serde(rename = "account_identifier", skip_serializing_if = "Option::is_none")] @@ -25,13 +28,12 @@ pub struct SigningPayload { } impl SigningPayload { - /// SigningPayload is signed by the client with the keypair associated with an AccountIdentifier using the specified SignatureType. SignatureType can be optionally populated if there is a restriction on the signature scheme that can be used to sign the payload. - pub fn new(hex_bytes: String) -> SigningPayload { - SigningPayload { - address: None, - account_identifier: None, - hex_bytes, - signature_type: None, - } + /// `SigningPayload` is signed by the client with the keypair associated with an + /// `AccountIdentifier` using the specified `SignatureType`. `SignatureType` can be optionally + /// populated if there is a restriction on the signature scheme that can be used to sign the + /// payload. + #[must_use] + pub const fn new(hex_bytes: String) -> Self { + Self { address: None, account_identifier: None, hex_bytes, signature_type: None } } } diff --git a/rosetta-types/src/sub_account_identifier.rs b/rosetta-types/src/sub_account_identifier.rs index 5bc09edf..c2c1c20c 100644 --- a/rosetta-types/src/sub_account_identifier.rs +++ b/rosetta-types/src/sub_account_identifier.rs @@ -8,24 +8,28 @@ * Generated by: https://openapi-generator.tech */ -/// SubAccountIdentifier : An account may have state specific to a contract address (ERC-20 token) and/or a stake (delegated balance). The sub_account_identifier should specify which state (if applicable) an account instantiation refers to. - +/// `SubAccountIdentifier` : An account may have state specific to a contract address (ERC-20 token) +/// and/or a stake (delegated balance). The `sub_account_identifier` should specify which state (if +/// applicable) an account instantiation refers to. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct SubAccountIdentifier { - /// The SubAccount address may be a cryptographic value or some other identifier (ex: bonded) that uniquely specifies a SubAccount. + /// The SubAccount address may be a cryptographic value or some other identifier (ex: bonded) + /// that uniquely specifies a SubAccount. #[serde(rename = "address")] pub address: String, - /// If the SubAccount address is not sufficient to uniquely specify a SubAccount, any other identifying information can be stored here. It is important to note that two SubAccounts with identical addresses but differing metadata will not be considered equal by clients. + /// If the SubAccount address is not sufficient to uniquely specify a SubAccount, any other + /// identifying information can be stored here. It is important to note that two SubAccounts + /// with identical addresses but differing metadata will not be considered equal by clients. #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option, } impl SubAccountIdentifier { - /// An account may have state specific to a contract address (ERC-20 token) and/or a stake (delegated balance). The sub_account_identifier should specify which state (if applicable) an account instantiation refers to. - pub fn new(address: String) -> SubAccountIdentifier { - SubAccountIdentifier { - address, - metadata: None, - } + /// An account may have state specific to a contract address (ERC-20 token) and/or a stake + /// (delegated balance). The `sub_account_identifier` should specify which state (if applicable) + /// an account instantiation refers to. + #[must_use] + pub const fn new(address: String) -> Self { + Self { address, metadata: None } } } diff --git a/rosetta-types/src/sub_network_identifier.rs b/rosetta-types/src/sub_network_identifier.rs index 94f332bd..1ca9de7f 100644 --- a/rosetta-types/src/sub_network_identifier.rs +++ b/rosetta-types/src/sub_network_identifier.rs @@ -8,8 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// SubNetworkIdentifier : In blockchains with sharded state, the SubNetworkIdentifier is required to query some object on a specific shard. This identifier is optional for all non-sharded blockchains. - +/// `SubNetworkIdentifier` : In blockchains with sharded state, the `SubNetworkIdentifier` is +/// required to query some object on a specific shard. This identifier is optional for all +/// non-sharded blockchains. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct SubNetworkIdentifier { #[serde(rename = "network")] @@ -19,11 +20,10 @@ pub struct SubNetworkIdentifier { } impl SubNetworkIdentifier { - /// In blockchains with sharded state, the SubNetworkIdentifier is required to query some object on a specific shard. This identifier is optional for all non-sharded blockchains. - pub fn new(network: String) -> SubNetworkIdentifier { - SubNetworkIdentifier { - network, - metadata: None, - } + /// In blockchains with sharded state, the `SubNetworkIdentifier` is required to query some + /// object on a specific shard. This identifier is optional for all non-sharded blockchains. + #[must_use] + pub const fn new(network: String) -> Self { + Self { network, metadata: None } } } diff --git a/rosetta-types/src/sync_status.rs b/rosetta-types/src/sync_status.rs index 0a58a09a..ce2ba0af 100644 --- a/rosetta-types/src/sync_status.rs +++ b/rosetta-types/src/sync_status.rs @@ -8,32 +8,44 @@ * Generated by: https://openapi-generator.tech */ -/// SyncStatus : SyncStatus is used to provide additional context about an implementation's sync status. This object is often used by implementations to indicate healthiness when block data cannot be queried until some sync phase completes or cannot be determined by comparing the timestamp of the most recent block with the current time. - +/// `SyncStatus` : `SyncStatus` is used to provide additional context about an implementation's sync +/// status. This object is often used by implementations to indicate healthiness when block data +/// cannot be queried until some sync phase completes or cannot be determined by comparing the +/// timestamp of the most recent block with the current time. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct SyncStatus { - /// CurrentIndex is the index of the last synced block in the current stage. This is a separate field from current_block_identifier in NetworkStatusResponse because blocks with indices up to and including the current_index may not yet be queryable by the caller. To reiterate, all indices up to and including current_block_identifier in NetworkStatusResponse must be queryable via the /block endpoint (excluding indices less than oldest_block_identifier). + /// CurrentIndex is the index of the last synced block in the current stage. This is a + /// separate field from current_block_identifier in NetworkStatusResponse because blocks with + /// indices up to and including the current_index may not yet be queryable by the caller. To + /// reiterate, all indices up to and including current_block_identifier in + /// NetworkStatusResponse must be queryable via the /block endpoint (excluding indices less + /// than oldest_block_identifier). #[serde(rename = "current_index", skip_serializing_if = "Option::is_none")] pub current_index: Option, - /// TargetIndex is the index of the block that the implementation is attempting to sync to in the current stage. + /// TargetIndex is the index of the block that the implementation is attempting to sync to in + /// the current stage. #[serde(rename = "target_index", skip_serializing_if = "Option::is_none")] pub target_index: Option, /// Stage is the phase of the sync process. #[serde(rename = "stage", skip_serializing_if = "Option::is_none")] pub stage: Option, - /// synced is a boolean that indicates if an implementation has synced up to the most recent block. If this field is not populated, the caller should rely on a traditional tip timestamp comparison to determine if an implementation is synced. This field is particularly useful for quiescent blockchains (blocks only produced when there are pending transactions). In these blockchains, the most recent block could have a timestamp far behind the current time but the node could be healthy and at tip. + /// synced is a boolean that indicates if an implementation has synced up to the most recent + /// block. If this field is not populated, the caller should rely on a traditional tip + /// timestamp comparison to determine if an implementation is synced. This field is + /// particularly useful for quiescent blockchains (blocks only produced when there are pending + /// transactions). In these blockchains, the most recent block could have a timestamp far + /// behind the current time but the node could be healthy and at tip. #[serde(rename = "synced", skip_serializing_if = "Option::is_none")] pub synced: Option, } impl SyncStatus { - /// SyncStatus is used to provide additional context about an implementation's sync status. This object is often used by implementations to indicate healthiness when block data cannot be queried until some sync phase completes or cannot be determined by comparing the timestamp of the most recent block with the current time. - pub fn new() -> SyncStatus { - SyncStatus { - current_index: None, - target_index: None, - stage: None, - synced: None, - } + /// `SyncStatus` is used to provide additional context about an implementation's sync status. + /// This object is often used by implementations to indicate healthiness when block data cannot + /// be queried until some sync phase completes or cannot be determined by comparing the + /// timestamp of the most recent block with the current time. + #[must_use] + pub const fn new() -> Self { + Self { current_index: None, target_index: None, stage: None, synced: None } } } diff --git a/rosetta-types/src/transaction.rs b/rosetta-types/src/transaction.rs index ffe7dced..5740657d 100644 --- a/rosetta-types/src/transaction.rs +++ b/rosetta-types/src/transaction.rs @@ -8,35 +8,30 @@ * Generated by: https://openapi-generator.tech */ -/// Transaction : Transactions contain an array of Operations that are attributable to the same TransactionIdentifier. - +/// `Transaction` contain an array of Operations that are attributable to the same +/// `TransactionIdentifier`. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Transaction { #[serde(rename = "transaction_identifier")] pub transaction_identifier: crate::TransactionIdentifier, #[serde(rename = "operations")] pub operations: Vec, - #[serde( - rename = "related_transactions", - skip_serializing_if = "Option::is_none" - )] + #[serde(rename = "related_transactions", skip_serializing_if = "Option::is_none")] pub related_transactions: Option>, - /// Transactions that are related to other transactions (like a cross-shard transaction) should include the tranaction_identifier of these transactions in the metadata. + /// Transactions that are related to other transactions (like a cross-shard transaction) should + /// include the tranaction_identifier of these transactions in the metadata. #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option, } impl Transaction { - /// Transactions contain an array of Operations that are attributable to the same TransactionIdentifier. - pub fn new( + /// `Transaction` contain an array of Operations that are attributable to the same + /// `TransactionIdentifier`. + #[must_use] + pub const fn new( transaction_identifier: crate::TransactionIdentifier, operations: Vec, - ) -> Transaction { - Transaction { - transaction_identifier, - operations, - related_transactions: None, - metadata: None, - } + ) -> Self { + Self { transaction_identifier, operations, related_transactions: None, metadata: None } } } diff --git a/rosetta-types/src/transaction_identifier.rs b/rosetta-types/src/transaction_identifier.rs index 3a939bcd..96394987 100644 --- a/rosetta-types/src/transaction_identifier.rs +++ b/rosetta-types/src/transaction_identifier.rs @@ -8,18 +8,22 @@ * Generated by: https://openapi-generator.tech */ -/// TransactionIdentifier : The transaction_identifier uniquely identifies a transaction in a particular network and block or in the mempool. - +/// `TransactionIdentifier` : The `transaction_identifier` uniquely identifies a transaction in a +/// particular network and block or in the mempool. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct TransactionIdentifier { - /// Any transactions that are attributable only to a block (ex: a block event) should use the hash of the block as the identifier. This should be normalized according to the case specified in the transaction_hash_case in network options. + /// Any transactions that are attributable only to a block (ex: a block event) should use the + /// hash of the block as the identifier. This should be normalized according to the case + /// specified in the transaction_hash_case in network options. #[serde(rename = "hash")] pub hash: String, } impl TransactionIdentifier { - /// The transaction_identifier uniquely identifies a transaction in a particular network and block or in the mempool. - pub fn new(hash: String) -> TransactionIdentifier { - TransactionIdentifier { hash } + /// The `transaction_identifier` uniquely identifies a transaction in a particular network and + /// block or in the mempool. + #[must_use] + pub const fn new(hash: String) -> Self { + Self { hash } } } diff --git a/rosetta-types/src/transaction_identifier_response.rs b/rosetta-types/src/transaction_identifier_response.rs index 4289f925..6e08c987 100644 --- a/rosetta-types/src/transaction_identifier_response.rs +++ b/rosetta-types/src/transaction_identifier_response.rs @@ -8,8 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// TransactionIdentifierResponse : TransactionIdentifierResponse contains the transaction_identifier of a transaction that was submitted to either `/construction/hash` or `/construction/submit`. - +/// `TransactionIdentifierResponse` : `TransactionIdentifierResponse` contains the +/// `transaction_identifier` of a transaction that was submitted to either `/construction/hash` or +/// `/construction/submit`. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct TransactionIdentifierResponse { #[serde(rename = "transaction_identifier")] @@ -19,13 +20,10 @@ pub struct TransactionIdentifierResponse { } impl TransactionIdentifierResponse { - /// TransactionIdentifierResponse contains the transaction_identifier of a transaction that was submitted to either `/construction/hash` or `/construction/submit`. - pub fn new( - transaction_identifier: crate::TransactionIdentifier, - ) -> TransactionIdentifierResponse { - TransactionIdentifierResponse { - transaction_identifier, - metadata: None, - } + /// `TransactionIdentifierResponse` contains the `transaction_identifier` of a transaction that + /// was submitted to either `/construction/hash` or `/construction/submit`. + #[must_use] + pub const fn new(transaction_identifier: crate::TransactionIdentifier) -> Self { + Self { transaction_identifier, metadata: None } } } diff --git a/rosetta-types/src/version.rs b/rosetta-types/src/version.rs index f157f12c..6a52b44d 100644 --- a/rosetta-types/src/version.rs +++ b/rosetta-types/src/version.rs @@ -8,32 +8,33 @@ * Generated by: https://openapi-generator.tech */ -/// Version : The Version object is utilized to inform the client of the versions of different components of the Rosetta implementation. - +/// Version : The Version object is utilized to inform the client of the versions of different +/// components of the Rosetta implementation. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Version { - /// The rosetta_version is the version of the Rosetta interface the implementation adheres to. This can be useful for clients looking to reliably parse responses. + /// The rosetta_version is the version of the Rosetta interface the implementation adheres to. + /// This can be useful for clients looking to reliably parse responses. #[serde(rename = "rosetta_version")] pub rosetta_version: String, - /// The node_version is the canonical version of the node runtime. This can help clients manage deployments. + /// The node_version is the canonical version of the node runtime. This can help clients manage + /// deployments. #[serde(rename = "node_version")] pub node_version: String, - /// When a middleware server is used to adhere to the Rosetta interface, it should return its version here. This can help clients manage deployments. + /// When a middleware server is used to adhere to the Rosetta interface, it should return its + /// version here. This can help clients manage deployments. #[serde(rename = "middleware_version", skip_serializing_if = "Option::is_none")] pub middleware_version: Option, - /// Any other information that may be useful about versioning of dependent services should be returned here. + /// Any other information that may be useful about versioning of dependent services should be + /// returned here. #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] pub metadata: Option, } impl Version { - /// The Version object is utilized to inform the client of the versions of different components of the Rosetta implementation. - pub fn new(rosetta_version: String, node_version: String) -> Version { - Version { - rosetta_version, - node_version, - middleware_version: None, - metadata: None, - } + /// The Version object is utilized to inform the client of the versions of different components + /// of the Rosetta implementation. + #[must_use] + pub const fn new(rosetta_version: String, node_version: String) -> Self { + Self { rosetta_version, node_version, middleware_version: None, metadata: None } } }