diff --git a/Cargo.lock b/Cargo.lock index 381f72370..cafe956fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1849,16 +1849,7 @@ version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e69600ff1703123957937708eb27f7a564e48885c537782722ed0ba3189ce1d7" dependencies = [ - "dirs-sys 0.3.7", -] - -[[package]] -name = "dirs" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" -dependencies = [ - "dirs-sys 0.4.1", + "dirs-sys", ] [[package]] @@ -1872,18 +1863,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "dirs-sys" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" -dependencies = [ - "libc", - "option-ext", - "redox_users", - "windows-sys 0.48.0", -] - [[package]] name = "discv5" version = "0.4.1" @@ -2222,7 +2201,6 @@ dependencies = [ "bimap", "bytes", "c-kzg", - "clap", "const_format", "discv5", "env_logger 0.9.3", @@ -2260,7 +2238,6 @@ dependencies = [ "tree_hash", "tree_hash_derive", "ureq", - "url", "validator", "vergen", ] @@ -2279,6 +2256,7 @@ dependencies = [ "hex", "jsonrpsee", "portal-bridge", + "portalnet", "rand", "reth-ipc", "rpc", @@ -4128,12 +4106,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "option-ext" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" - [[package]] name = "overload" version = "0.1.1" @@ -6782,32 +6754,15 @@ dependencies = [ "alloy", "anyhow", "clap", - "dirs", "discv5", - "e2store", - "ethereum_ssz", "ethportal-api", - "ethportal-peertest", - "futures", - "jsonrpsee", - "lazy_static", - "parking_lot 0.11.2", - "portal-bridge", "portalnet", "prometheus_exporter", - "rand", - "regex", - "reqwest", - "reth-ipc", "rpc", - "serde_json", - "serde_yaml", - "serial_test", - "sha3 0.9.1", - "tempfile", + "rstest", + "test-log", "tokio", "tracing", - "tracing-subscriber 0.3.19", "tree_hash", "trin-beacon", "trin-history", @@ -6815,7 +6770,6 @@ dependencies = [ "trin-storage", "trin-utils", "trin-validation", - "ureq", "url", "utp-rs", ] @@ -6992,9 +6946,11 @@ dependencies = [ name = "trin-utils" version = "0.1.0" dependencies = [ + "alloy", "ansi_term", "atty", "directories", + "ethportal-api", "tempfile", "tracing", "tracing-subscriber 0.3.19", diff --git a/Cargo.toml b/Cargo.toml index 5f822b6ec..6fb6ecaa7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,62 +1,6 @@ -[package] -name = "trin" -default-run = "trin" -description = "A Rust implementation of the Ethereum Portal Network" -authors.workspace = true -categories.workspace = true -edition.workspace = true -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -rust-version.workspace = true -version.workspace = true - -[dependencies] -alloy = { workspace = true, features = ["eips", "provider-ipc", "provider-ws", "pubsub", "reqwest", "rpc-types"] } -anyhow.workspace = true -clap.workspace = true -dirs = "5.0.1" -discv5.workspace = true -e2store.workspace = true -ethereum_ssz.workspace = true -ethportal-api.workspace = true -futures.workspace = true -jsonrpsee.workspace = true -lazy_static.workspace = true -parking_lot.workspace = true -portal-bridge.workspace = true -portalnet.workspace = true -prometheus_exporter.workspace = true -rand.workspace = true -regex = "1.10.2" -reqwest.workspace = true -reth-ipc.workspace = true -rpc.workspace = true -serde_json = { workspace = true, features = ["preserve_order"]} -sha3.workspace = true -tempfile.workspace = true -tokio.workspace = true -tracing.workspace = true -tracing-subscriber.workspace = true -tree_hash.workspace = true -trin-beacon.workspace = true -trin-history.workspace = true -trin-state.workspace = true -trin-storage.workspace = true -trin-utils.workspace = true -trin-validation.workspace = true -url.workspace = true -utp-rs.workspace = true - -[dev-dependencies] -ethportal-peertest = { path = "ethportal-peertest" } -serde_yaml.workspace = true -serial_test.workspace = true -ureq.workspace = true - [workspace] members = [ + "bin/trin", "ethportal-api", "ethportal-peertest", "e2store", @@ -75,6 +19,9 @@ members = [ "utp-testing", ] +default-members = ["bin/trin"] +resolver = "2" + [workspace.package] authors = ["https://github.com/ethereum/trin/graphs/contributors"] categories = ["cryptography::cryptocurrencies"] @@ -97,12 +44,10 @@ clap = { version = "4.2.1", features = ["derive"] } delay_map = "0.4.0" directories = "3.0" discv5 = { version = "0.4.1", features = ["serde"] } -e2store = { path = "e2store" } env_logger = "0.9.0" eth_trie = "0.5.0" ethereum_ssz = "0.7.1" ethereum_ssz_derive = "0.7.1" -ethportal-api = { path = "ethportal-api" } futures = "0.3.23" futures-util = "0.3.23" hex = "0.4.3" @@ -111,10 +56,7 @@ itertools = "0.13.0" jsonrpsee = "0.24.4" keccak-hash = "0.10.0" lazy_static = "1.4.0" -light-client = { path = "light-client" } parking_lot = "0.11.2" -portal-bridge = { path = "portal-bridge" } -portalnet = { path = "portalnet" } prometheus_exporter = "0.8.4" quickcheck = "1.0.3" r2d2 = "0.8.9" @@ -124,7 +66,6 @@ reqwest = { version = "0.12.7", features = ["native-tls-vendored", "json"] } reth-ipc = { tag = "v1.0.8", git = "https://github.com/paradigmxyz/reth.git"} revm = { version = "14.0.3", default-features = false, features = ["std", "secp256k1", "serde-json", "c-kzg"] } revm-primitives = { version = "10.0.0", default-features = false, features = ["std", "serde"] } -rpc = { path = "rpc"} rstest = "0.18.2" rusqlite = { version = "0.31.0", features = ["bundled"] } scraper = "0.18.1" @@ -147,6 +88,19 @@ tracing-subscriber = "0.3.15" tracing-test = "0.1" tree_hash = "0.8.0" tree_hash_derive = "0.8.0" +uds_windows = "1.0.1" +ureq = { version = "2.5.0", features = ["json"] } +url = "2.3.1" +utp-rs = { tag = "v0.1.0-alpha.14", git = "https://github.com/ethereum/utp" } + +# Trin workspace crates +e2store = { path = "e2store" } +ethportal-api = { path = "ethportal-api" } +light-client = { path = "light-client" } +portal-bridge = { path = "portal-bridge" } +portalnet = { path = "portalnet" } +rpc = { path = "rpc"} +trin = { path = "bin/trin"} trin-beacon = { path = "trin-beacon" } trin-evm = { path = "trin-evm" } trin-execution = { path = "trin-execution" } @@ -156,7 +110,3 @@ trin-state = { path = "trin-state" } trin-storage = { path = "trin-storage" } trin-utils = { path = "trin-utils" } trin-validation = { path = "trin-validation" } -uds_windows = "1.0.1" -ureq = { version = "2.5.0", features = ["json"] } -url = "2.3.1" -utp-rs = { tag = "v0.1.0-alpha.14", git = "https://github.com/ethereum/utp" } diff --git a/bin/trin/Cargo.toml b/bin/trin/Cargo.toml new file mode 100644 index 000000000..2af7ae383 --- /dev/null +++ b/bin/trin/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "trin" +description = "A Rust implementation of the Ethereum Portal Network" +authors.workspace = true +categories.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +alloy = { workspace = true, features = ["eips", "provider-ipc", "provider-ws", "pubsub", "reqwest", "rpc-types"] } +anyhow.workspace = true +clap.workspace = true +discv5.workspace = true +ethportal-api.workspace = true +portalnet.workspace = true +prometheus_exporter.workspace = true +rpc.workspace = true +tokio.workspace = true +tracing.workspace = true +tree_hash.workspace = true +trin-beacon.workspace = true +trin-history.workspace = true +trin-state.workspace = true +trin-storage.workspace = true +trin-utils.workspace = true +trin-validation.workspace = true +utp-rs.workspace = true +url.workspace = true + +[dev-dependencies] +test-log.workspace = true +rstest.workspace = true diff --git a/ethportal-api/src/types/cli.rs b/bin/trin/src/cli.rs similarity index 82% rename from ethportal-api/src/types/cli.rs rename to bin/trin/src/cli.rs index eb095a251..2473b3f44 100644 --- a/ethportal-api/src/types/cli.rs +++ b/bin/trin/src/cli.rs @@ -7,70 +7,34 @@ use clap::{ error::{Error, ErrorKind}, Args, Parser, Subcommand, }; -use url::Url; - -use crate::{ - types::{bootnodes::Bootnodes, network::Subnetwork}, +use ethportal_api::{ + dashboard::grafana::{GrafanaAPI, DASHBOARD_TEMPLATES}, + types::{ + network::Subnetwork, + portal_wire::{NetworkSpec, MAINNET}, + }, version::FULL_VERSION, }; +use portalnet::{ + bootnodes::Bootnodes, + config::{PortalnetConfig, NODE_ADDR_CACHE_CAPACITY}, + constants::{ + DEFAULT_DISCOVERY_PORT, DEFAULT_NETWORK, DEFAULT_UTP_TRANSFER_LIMIT, + DEFAULT_WEB3_HTTP_ADDRESS, DEFAULT_WEB3_IPC_PATH, DEFAULT_WEB3_WS_PORT, + }, +}; +use rpc::config::RpcConfig; +use trin_storage::config::StorageCapacityConfig; +use trin_utils::cli::{ + check_private_key_length, network_parser, subnetwork_parser, Web3TransportType, +}; +use url::Url; -pub const DEFAULT_WEB3_IPC_PATH: &str = "/tmp/trin-jsonrpc.ipc"; -pub const DEFAULT_WEB3_HTTP_ADDRESS: &str = "http://127.0.0.1:8545/"; -pub const DEFAULT_WEB3_HTTP_PORT: u16 = 8545; -pub const DEFAULT_WEB3_WS_PORT: u16 = 8546; -pub const DEFAULT_DISCOVERY_PORT: u16 = 9009; -pub const DEFAULT_UTP_TRANSFER_LIMIT: usize = 50; const DEFAULT_SUBNETWORKS: &str = "history"; -pub const DEFAULT_NETWORK: &str = "mainnet"; pub const DEFAULT_STORAGE_CAPACITY_MB: &str = "1000"; pub const DEFAULT_WEB3_TRANSPORT: &str = "ipc"; -use super::portal_wire::{NetworkSpec, ANGELFOOD, MAINNET}; -use crate::dashboard::grafana::{GrafanaAPI, DASHBOARD_TEMPLATES}; - -#[derive(Debug, PartialEq, Clone)] -pub enum Web3TransportType { - HTTP, - IPC, -} - -impl fmt::Display for Web3TransportType { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::HTTP => write!(f, "http"), - Self::IPC => write!(f, "ipc"), - } - } -} - -impl FromStr for Web3TransportType { - type Err = &'static str; - - fn from_str(s: &str) -> Result { - match s { - "http" => Ok(Web3TransportType::HTTP), - "ipc" => Ok(Web3TransportType::IPC), - _ => Err("Invalid web3-transport arg. Expected either 'http' or 'ipc'"), - } - } -} - const APP_NAME: &str = "trin"; - -/// The storage capacity configurtion. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum StorageCapacityConfig { - Combined { - total_mb: u32, - subnetworks: Vec, - }, - Specific { - beacon_mb: Option, - history_mb: Option, - state_mb: Option, - }, -} - #[derive(Parser, Debug, PartialEq, Clone)] #[command(name = APP_NAME, author = "https://github.com/ethereum/trin/graphs/contributors", @@ -417,45 +381,6 @@ impl TrinConfig { } } -pub fn check_private_key_length(private_key: &str) -> Result { - if private_key.len() == 66 { - return B256::from_str(private_key).map_err(|err| format!("HexError: {err}")); - } - Err(format!( - "Invalid private key length: {}, expected 66 (0x-prefixed 32 byte hexstring)", - private_key.len() - )) -} - -pub fn network_parser(network_string: &str) -> Result, String> { - match network_string { - "mainnet" => Ok(MAINNET.clone()), - "angelfood" => Ok(ANGELFOOD.clone()), - _ => Err(format!( - "Not a valid network: {network_string}, must be 'angelfood' or 'mainnet'" - )), - } -} - -pub fn subnetwork_parser(subnetwork_string: &str) -> Result>, String> { - let subnetworks = subnetwork_string - .split(',') - .map(Subnetwork::from_cli_arg) - .collect::, String>>()?; - - if subnetworks.is_empty() { - return Err("At least one subnetwork must be enabled".to_owned()); - } - - for subnetwork in &subnetworks { - if !subnetwork.is_active() { - return Err("{subnetwork} subnetwork has not yet been activated".to_owned()); - } - } - - Ok(Arc::new(subnetworks)) -} - fn check_trusted_block_root(trusted_root: &str) -> Result { if !trusted_root.starts_with("0x") { return Err("Trusted block root must be prefixed with 0x".to_owned()); @@ -537,15 +462,43 @@ pub fn create_dashboard( Ok(()) } +impl TrinConfig { + pub fn to_portalnet_config(&self, private_key: B256) -> PortalnetConfig { + PortalnetConfig { + external_addr: self.external_addr, + private_key, + listen_port: self.discovery_port, + bootnodes: self.bootnodes.to_enrs(self.network.network()), + no_stun: self.no_stun, + no_upnp: self.no_upnp, + node_addr_cache_capacity: NODE_ADDR_CACHE_CAPACITY, + disable_poke: self.disable_poke, + trusted_block_root: self.trusted_block_root, + utp_transfer_limit: self.utp_transfer_limit, + } + } +} + +impl From<&TrinConfig> for RpcConfig { + fn from(config: &TrinConfig) -> Self { + RpcConfig { + portal_subnetworks: config.portal_subnetworks.clone(), + web3_transport: config.web3_transport.clone(), + web3_ipc_path: config.web3_ipc_path.clone(), + web3_http_address: config.web3_http_address.clone(), + ws: config.ws, + ws_port: config.ws_port, + } + } +} + #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr}; - use test_log::test; - use super::*; - #[test] + #[test_log::test] fn test_default_args() { let expected_config = TrinConfig::default(); let actual_config = TrinConfig::new_from(["trin"]).unwrap(); @@ -560,12 +513,12 @@ mod tests { assert_eq!(actual_config.ephemeral, expected_config.ephemeral); } - #[test] + #[test_log::test] fn test_help() { TrinConfig::new_from(["trin", "-h"]).expect_err("Should be an error to exit early"); } - #[test] + #[test_log::test] fn test_custom_http_args() { let expected_config = TrinConfig { web3_http_address: Url::parse("http://0.0.0.0:8080/").unwrap(), @@ -587,7 +540,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn test_ipc_protocol() { let actual_config: TrinConfig = Default::default(); let expected_config = TrinConfig { @@ -602,7 +555,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn test_ipc_with_custom_path() { let actual_config = TrinConfig::new_from(["trin", "--web3-ipc-path", "/path/test.ipc"]).unwrap(); @@ -620,7 +573,7 @@ mod tests { assert_eq!(actual_config.web3_ipc_path, expected_config.web3_ipc_path); } - #[test] + #[test_log::test] #[should_panic(expected = "Must not supply an ipc path when using http")] fn test_http_protocol_rejects_custom_web3_ipc_path() { @@ -634,13 +587,13 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] #[should_panic(expected = "Must not supply an http address when using ipc")] fn test_ipc_protocol_rejects_custom_web3_http_address() { TrinConfig::new_from(["trin", "--web3-http-address", "http://127.0.0.1:1234/"]).unwrap(); } - #[test] + #[test_log::test] fn test_custom_discovery_port() { let expected_config = TrinConfig { discovery_port: 999, @@ -650,7 +603,7 @@ mod tests { assert_eq!(actual_config.discovery_port, expected_config.discovery_port); } - #[test] + #[test_log::test] fn test_manual_external_addr_v4() { let actual_config = TrinConfig::new_from(["trin", "--external-address", "127.0.0.1:1234"]).unwrap(); @@ -660,7 +613,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn test_manual_external_addr_v6() { let actual_config = TrinConfig::new_from(["trin", "--external-address", "[::1]:1234"]).unwrap(); @@ -670,7 +623,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn test_custom_private_key() { let expected_config = TrinConfig { private_key: Some(B256::from_slice(&[1; 32])), @@ -685,7 +638,7 @@ mod tests { assert_eq!(actual_config.private_key, expected_config.private_key); } - #[test] + #[test_log::test] fn test_ephemeral() { let expected_config = TrinConfig { ephemeral: true, @@ -695,7 +648,7 @@ mod tests { assert_eq!(actual_config.ephemeral, expected_config.ephemeral); } - #[test] + #[test_log::test] fn test_enable_metrics_with_url() { let expected_config = TrinConfig { enable_metrics_with_url: Some(SocketAddr::new( @@ -712,7 +665,7 @@ mod tests { ); } - #[test] + #[test_log::test] #[should_panic( expected = "Invalid private key length: 65, expected 66 (0x-prefixed 32 byte hexstring)" )] @@ -725,7 +678,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] #[should_panic( expected = "Invalid private key length: 64, expected 66 (0x-prefixed 32 byte hexstring)" )] @@ -738,7 +691,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] #[should_panic( expected = "Invalid trusted block root length: 64, expected 66 (0x-prefixed 32 byte hexstring)" )] @@ -751,7 +704,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] #[should_panic(expected = "Trusted block root must be prefixed with 0x")] fn test_trusted_block_root_starts_with_0x() { TrinConfig::new_from([ @@ -762,7 +715,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] fn test_trin_with_create_dashboard() { let config = TrinConfig::try_parse_from([ "trin", @@ -789,18 +742,16 @@ mod tests { } } - #[test] + #[test_log::test] #[should_panic(expected = "Invalid web3-transport arg. Expected either 'http' or 'ipc'")] fn test_invalid_web3_transport_argument() { TrinConfig::new_from(["trin", "--web3-transport", "invalid"]).unwrap(); } mod storage_config { - use test_log::test; - use super::*; - #[test] + #[test_log::test] fn no_flags() { let config = TrinConfig::new_from(["trin"]).unwrap(); assert_eq!( @@ -812,7 +763,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn with_subnetworks() { let config = TrinConfig::new_from(["trin", "--portal-subnetworks", "history,state"]).unwrap(); @@ -825,7 +776,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn with_total() { let config = TrinConfig::new_from(["trin", "--storage.total", "200"]).unwrap(); assert_eq!( @@ -837,7 +788,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn with_total_and_subnetworks() { let config = TrinConfig::new_from([ "trin", @@ -856,7 +807,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn with_mb() { let config = TrinConfig::new_from(["trin", "--mb", "200"]).unwrap(); assert_eq!( @@ -868,7 +819,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn with_mb_and_subnetworks() { let config = TrinConfig::new_from([ "trin", @@ -887,7 +838,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn with_total_and_all_subnetworks() { let config = TrinConfig::new_from([ "trin", @@ -906,7 +857,7 @@ mod tests { ); } - #[test] + #[test_log::test] #[should_panic( expected = "--storage.total and --storage.beacon can't be set at the same time" )] @@ -915,7 +866,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] #[should_panic( expected = "--storage.total and --storage.history can't be set at the same time" )] @@ -924,7 +875,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] #[should_panic( expected = "--storage.total and --storage.state can't be set at the same time" )] @@ -933,7 +884,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] fn with_history() { let config = TrinConfig::new_from(["trin", "--storage.history", "200"]).unwrap(); assert_eq!( @@ -946,7 +897,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn with_history_and_state() { let config = TrinConfig::new_from([ "trin", @@ -968,7 +919,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn with_history_and_state_and_beacon() { let config = TrinConfig::new_from([ "trin", @@ -992,14 +943,14 @@ mod tests { ); } - #[test] + #[test_log::test] #[should_panic(expected = "--storage.state is set but State subnetwork is not enabled")] fn with_history_and_state_without_subnetworks() { TrinConfig::new_from(["trin", "--storage.history", "200", "--storage.state", "300"]) .unwrap(); } - #[test] + #[test_log::test] #[should_panic(expected = "--storage.beacon is set but Beacon subnetwork is not enabled")] fn with_history_and_beacon_without_subnetworks() { TrinConfig::new_from([ @@ -1012,7 +963,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] #[should_panic(expected = "--storage.history is set but History subnetwork is not enabled")] fn with_history_and_beacon_without_history_subnetwork() { TrinConfig::new_from([ @@ -1027,7 +978,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] #[should_panic(expected = "History subnetwork enabled but --storage.history is not set")] fn specific_without_history_with_subnetwork() { TrinConfig::new_from([ @@ -1040,7 +991,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] #[should_panic(expected = "State subnetwork enabled but --storage.state is not set")] fn specific_without_state_with_subnetwork() { TrinConfig::new_from([ @@ -1053,7 +1004,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] #[should_panic(expected = "Beacon subnetwork enabled but --storage.beacon is not set")] fn specific_without_beacon_with_subnetwork() { TrinConfig::new_from([ @@ -1066,7 +1017,7 @@ mod tests { .unwrap(); } - #[test] + #[test_log::test] fn with_total_zero() { let config = TrinConfig::new_from([ "trin", @@ -1085,7 +1036,7 @@ mod tests { ); } - #[test] + #[test_log::test] fn with_zero_per_subnetwork() { let config = TrinConfig::new_from([ "trin", @@ -1109,4 +1060,86 @@ mod tests { ); } } + + mod bootnodes { + use ethportal_api::{types::network::Network, Enr}; + use portalnet::bootnodes::Bootnode; + + use super::*; + + #[test_log::test] + fn test_bootnodes_default_with_default_bootnodes() { + let config = TrinConfig::new_from(["trin"]).unwrap(); + assert_eq!(config.bootnodes, Bootnodes::Default); + let bootnodes: Vec = config.bootnodes.to_enrs(Network::Mainnet); + assert_eq!(bootnodes.len(), 11); + } + + #[test_log::test] + fn test_bootnodes_default_with_explicit_default_bootnodes() { + let config = TrinConfig::new_from(["trin", "--bootnodes", "default"]).unwrap(); + assert_eq!(config.bootnodes, Bootnodes::Default); + let bootnodes: Vec = config.bootnodes.to_enrs(Network::Mainnet); + assert_eq!(bootnodes.len(), 11); + } + + #[test_log::test] + fn test_bootnodes_default_with_no_bootnodes() { + let config = TrinConfig::new_from(["trin", "--bootnodes", "none"]).unwrap(); + assert_eq!(config.bootnodes, Bootnodes::None); + let bootnodes: Vec = config.bootnodes.to_enrs(Network::Mainnet); + assert_eq!(bootnodes.len(), 0); + } + + #[rstest::rstest] + #[case("invalid")] + #[case("enr:-IS4QBISSFfBzsBrjq61iSIxPMfp5ShBTW6KQUglzH_tj8_SJaehXdlnZI-NAkTGeoclwnTB-pU544BQA44BiDZ2rkMBgmlkgnY0gmlwhKEjVaWJc2VjcDI1NmsxoQOSGugH1jSdiE_fRK1FIBe9oLxaWH8D_7xXSnaOVBe-SYN1ZHCCIyg,invalid")] + #[should_panic] + fn test_bootnodes_invalid_enr(#[case] bootnode: &str) { + TrinConfig::new_from(["trin", "--bootnodes", bootnode]).unwrap(); + } + + #[rstest::rstest] + #[case("enr:-IS4QBISSFfBzsBrjq61iSIxPMfp5ShBTW6KQUglzH_tj8_SJaehXdlnZI-NAkTGeoclwnTB-pU544BQA44BiDZ2rkMBgmlkgnY0gmlwhKEjVaWJc2VjcDI1NmsxoQOSGugH1jSdiE_fRK1FIBe9oLxaWH8D_7xXSnaOVBe-SYN1ZHCCIyg", 1)] + #[case("enr:-IS4QBISSFfBzsBrjq61iSIxPMfp5ShBTW6KQUglzH_tj8_SJaehXdlnZI-NAkTGeoclwnTB-pU544BQA44BiDZ2rkMBgmlkgnY0gmlwhKEjVaWJc2VjcDI1NmsxoQOSGugH1jSdiE_fRK1FIBe9oLxaWH8D_7xXSnaOVBe-SYN1ZHCCIyg,enr:-IS4QPUT9hwV4YfNTxazR2ltch4qKzvX_HwxQBw8gUN3q1MDfNyaD1EHc1wQZRTUzQQD-RVYx3h4nA1Sqk0Wx9DwzNABgmlkgnY0gmlwhM69ZOyJc2VjcDI1NmsxoQLaI-m2CDIjpwcnUf1ESspvOctJLpIrLA8AZ4zbo_1bFIN1ZHCCIyg", 2)] + #[case("enr:-IS4QBISSFfBzsBrjq61iSIxPMfp5ShBTW6KQUglzH_tj8_SJaehXdlnZI-NAkTGeoclwnTB-pU544BQA44BiDZ2rkMBgmlkgnY0gmlwhKEjVaWJc2VjcDI1NmsxoQOSGugH1jSdiE_fRK1FIBe9oLxaWH8D_7xXSnaOVBe-SYN1ZHCCIyg,enr:-IS4QPUT9hwV4YfNTxazR2ltch4qKzvX_HwxQBw8gUN3q1MDfNyaD1EHc1wQZRTUzQQD-RVYx3h4nA1Sqk0Wx9DwzNABgmlkgnY0gmlwhM69ZOyJc2VjcDI1NmsxoQLaI-m2CDIjpwcnUf1ESspvOctJLpIrLA8AZ4zbo_1bFIN1ZHCCIyg,enr:-IS4QB77AROcGX-TSkY-U-SaZJ5ma9ICQj6ETO3FqUdCnTZeJ0mDrdCKUqd5AQ0jrHa7m9-mOLvFFKMV_-tBD8uDYZUBgmlkgnY0gmlwhJ_fCDaJc2VjcDI1NmsxoQN9rahqamBOJfj4u6yssJQJ1-EZoyAw-7HIgp1FwNUdnoN1ZHCCIyg", 3)] + fn test_bootnodes_valid_enrs(#[case] bootnode: &str, #[case] expected_length: usize) { + use ethportal_api::types::network::Network; + + let config = TrinConfig::new_from(["trin", "--bootnodes", bootnode]).unwrap(); + match config.bootnodes.clone() { + Bootnodes::Custom(bootnodes) => { + assert_eq!(bootnodes.len(), expected_length); + } + _ => panic!("Bootnodes should be custom"), + }; + let bootnodes: Vec = config.bootnodes.to_enrs(Network::Mainnet); + assert_eq!(bootnodes.len(), expected_length); + } + + #[rstest::rstest] + fn test_angelfood_network_defaults_to_correct_bootnodes() { + let config = TrinConfig::new_from(["trin", "--network", "angelfood"]).unwrap(); + assert_eq!(config.bootnodes, Bootnodes::Default); + let bootnodes: Vec = config.bootnodes.to_enrs(Network::Angelfood); + assert_eq!(bootnodes.len(), 1); + } + + #[rstest::rstest] + fn test_custom_bootnodes_override_angelfood_default() { + let enr = "enr:-IS4QBISSFfBzsBrjq61iSIxPMfp5ShBTW6KQUglzH_tj8_SJaehXdlnZI-NAkTGeoclwnTB-pU544BQA44BiDZ2rkMBgmlkgnY0gmlwhKEjVaWJc2VjcDI1NmsxoQOSGugH1jSdiE_fRK1FIBe9oLxaWH8D_7xXSnaOVBe-SYN1ZHCCIyg"; + let config = + TrinConfig::new_from(["trin", "--network", "angelfood", "--bootnodes", enr]) + .unwrap(); + assert_eq!( + config.bootnodes, + Bootnodes::Custom(vec![Bootnode { + enr: Enr::from_str(enr).unwrap(), + alias: "custom".to_string(), + }]) + ); + let bootnodes: Vec = config.bootnodes.to_enrs(Network::Angelfood); + assert_eq!(bootnodes.len(), 1); + } + } } diff --git a/src/lib.rs b/bin/trin/src/lib.rs similarity index 95% rename from src/lib.rs rename to bin/trin/src/lib.rs index 20edd3a2b..ab33dc339 100644 --- a/src/lib.rs +++ b/bin/trin/src/lib.rs @@ -1,17 +1,15 @@ #![warn(clippy::unwrap_used)] #![warn(clippy::uninlined_format_args)] +pub mod cli; + use std::sync::Arc; -#[cfg(windows)] -use ethportal_api::types::cli::Web3TransportType; +use cli::TrinConfig; use ethportal_api::{ - types::{cli::TrinConfig, network::Subnetwork}, - utils::bytes::hex_encode, - version::get_trin_version, + types::network::Subnetwork, utils::bytes::hex_encode, version::get_trin_version, }; use portalnet::{ - config::PortalnetConfig, discovery::{Discovery, Discv5UdpSocket}, events::PortalnetEvents, utils::db::{configure_node_data_dir, configure_trin_data_dir}, @@ -24,6 +22,8 @@ use trin_beacon::initialize_beacon_network; use trin_history::initialize_history_network; use trin_state::initialize_state_network; use trin_storage::PortalStorageConfigFactory; +#[cfg(windows)] +use trin_utils::cli::Web3TransportType; use trin_validation::oracle::HeaderOracle; use utp_rs::socket::UtpSocket; @@ -52,7 +52,7 @@ pub async fn run_trin( trin_config.network.network(), )?; - let portalnet_config = PortalnetConfig::new(&trin_config, private_key); + let portalnet_config = trin_config.to_portalnet_config(private_key); // Initialize base discovery protocol let mut discovery = Discovery::new(portalnet_config.clone(), trin_config.network.clone())?; @@ -155,10 +155,9 @@ pub async fn run_trin( }; // Launch JSON-RPC server - let jsonrpc_trin_config = trin_config.clone(); let jsonrpc_discovery = Arc::clone(&discovery); let rpc_handle: RpcServerHandle = launch_jsonrpc_server( - jsonrpc_trin_config, + (&trin_config).into(), jsonrpc_discovery, history_jsonrpc_tx, state_jsonrpc_tx, diff --git a/src/main.rs b/bin/trin/src/main.rs similarity index 88% rename from src/main.rs rename to bin/trin/src/main.rs index 8614f7229..75eedfe51 100644 --- a/src/main.rs +++ b/bin/trin/src/main.rs @@ -1,8 +1,7 @@ #![warn(clippy::unwrap_used)] -use ethportal_api::types::cli::TrinConfig; use tracing::error; -use trin::run_trin; +use trin::{cli::TrinConfig, run_trin}; use trin_utils::log::init_tracing_logger; #[tokio::main] diff --git a/docker/Dockerfile b/docker/Dockerfile index efb5b16ca..5f67c4345 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -24,9 +24,6 @@ WORKDIR /app # copy build artifacts from build stage COPY --from=builder /app/target/release/trin /usr/bin/ -COPY --from=builder /app/target/release/poll_latest /usr/bin/ -COPY --from=builder /app/target/release/purge_invalid_history_content /usr/bin/ -COPY --from=builder /app/target/release/sample_range /usr/bin/ ENV RUST_LOG=debug diff --git a/docker/Dockerfile.bridge b/docker/Dockerfile.bridge index 4b8d44ab7..1b20733ef 100644 --- a/docker/Dockerfile.bridge +++ b/docker/Dockerfile.bridge @@ -37,8 +37,6 @@ WORKDIR /app COPY --from=builder /app/target/release/trin /usr/bin/ COPY --from=builder /app/trin-execution/resources /resources COPY --from=builder /app/target/release/portal-bridge /usr/bin/ -COPY --from=builder /app/target/release/sample_range /usr/bin/ -COPY --from=builder /app/target/release/poll_latest /usr/bin/ RUN apt-get update && apt-get install libcurl4 -y diff --git a/ethportal-api/Cargo.toml b/ethportal-api/Cargo.toml index 74d2a2f5c..17c9788ec 100644 --- a/ethportal-api/Cargo.toml +++ b/ethportal-api/Cargo.toml @@ -20,7 +20,6 @@ base64 = "0.13.0" bimap = "0.6.3" bytes.workspace = true c-kzg = "1.0.0" -clap.workspace = true const_format = {version = "0.2.0", features = ["rust_1_64"]} discv5.workspace = true eth_trie.workspace = true @@ -51,7 +50,6 @@ tokio.workspace = true tree_hash.workspace = true tree_hash_derive.workspace = true ureq.workspace = true -url.workspace = true validator = { version = "0.13.0", features = ["derive"] } [dev-dependencies] diff --git a/ethportal-api/src/lib.rs b/ethportal-api/src/lib.rs index aa1340ef8..56f5e4a85 100644 --- a/ethportal-api/src/lib.rs +++ b/ethportal-api/src/lib.rs @@ -4,11 +4,10 @@ #![warn(clippy::unwrap_used)] #![warn(clippy::uninlined_format_args)] -#[macro_use] extern crate lazy_static; mod beacon; -mod dashboard; +pub mod dashboard; pub mod discv5; mod eth; mod history; diff --git a/ethportal-api/src/types/mod.rs b/ethportal-api/src/types/mod.rs index e64bafdb0..b99b126cd 100644 --- a/ethportal-api/src/types/mod.rs +++ b/ethportal-api/src/types/mod.rs @@ -1,6 +1,4 @@ -pub mod bootnodes; pub mod bytes; -pub mod cli; pub mod consensus; pub mod content_key; pub mod content_value; diff --git a/ethportal-peertest/Cargo.toml b/ethportal-peertest/Cargo.toml index 2f8bb49da..1d70138b6 100644 --- a/ethportal-peertest/Cargo.toml +++ b/ethportal-peertest/Cargo.toml @@ -21,6 +21,7 @@ ethportal-api.workspace = true futures.workspace = true hex.workspace = true jsonrpsee = { workspace = true, features = ["async-client", "client", "macros", "server"]} +portalnet.workspace = true portal-bridge.workspace = true rand.workspace = true reth-ipc.workspace = true @@ -33,7 +34,7 @@ tokio.workspace = true tracing.workspace = true tracing-subscriber.workspace = true tree_hash.workspace = true -trin = { path = ".." } +trin.workspace = true trin-history.workspace = true trin-state.workspace = true trin-utils.workspace = true diff --git a/ethportal-peertest/src/lib.rs b/ethportal-peertest/src/lib.rs index 010456f29..c2b2e8c26 100644 --- a/ethportal-peertest/src/lib.rs +++ b/ethportal-peertest/src/lib.rs @@ -7,7 +7,6 @@ use std::{net::Ipv4Addr, path::PathBuf, thread, time}; use ethportal_api::{ types::{ - cli::{TrinConfig, DEFAULT_DISCOVERY_PORT}, enr::Enr, network::{Network, Subnetwork}, }, @@ -16,7 +15,9 @@ use ethportal_api::{ }; use futures::future; use jsonrpsee::async_client::Client; +use portalnet::constants::DEFAULT_DISCOVERY_PORT; use rpc::RpcServerHandle; +use trin::cli::TrinConfig; pub struct PeertestNode { pub enr: Enr, diff --git a/ethportal-peertest/src/scenarios/offer_accept.rs b/ethportal-peertest/src/scenarios/offer_accept.rs index f50ec7510..99952e93f 100644 --- a/ethportal-peertest/src/scenarios/offer_accept.rs +++ b/ethportal-peertest/src/scenarios/offer_accept.rs @@ -4,14 +4,12 @@ use alloy::primitives::Bytes; use e2store::era1::Era1; use ethportal_api::{ jsonrpsee::{async_client::Client, http_client::HttpClient}, - types::{ - cli::DEFAULT_UTP_TRANSFER_LIMIT, enr::Enr, execution::accumulator::EpochAccumulator, - portal_wire::OfferTrace, - }, + types::{enr::Enr, execution::accumulator::EpochAccumulator, portal_wire::OfferTrace}, utils::bytes::hex_encode, ContentValue, Discv5ApiClient, HistoryContentKey, HistoryContentValue, HistoryNetworkApiClient, }; use portal_bridge::api::execution::construct_proof; +use portalnet::constants::DEFAULT_UTP_TRANSFER_LIMIT; use ssz::Decode; use tracing::info; diff --git a/ethportal-peertest/src/scenarios/put_content.rs b/ethportal-peertest/src/scenarios/put_content.rs index e4852cad8..ccfe3ee06 100644 --- a/ethportal-peertest/src/scenarios/put_content.rs +++ b/ethportal-peertest/src/scenarios/put_content.rs @@ -1,10 +1,10 @@ use std::net::{IpAddr, Ipv4Addr}; use ethportal_api::{ - jsonrpsee::async_client::Client, types::cli::TrinConfig, ContentValue, Discv5ApiClient, - HistoryNetworkApiClient, + jsonrpsee::async_client::Client, ContentValue, Discv5ApiClient, HistoryNetworkApiClient, }; use tracing::info; +use trin::cli::TrinConfig; use crate::{ utils::{ diff --git a/portal-bridge/src/cli.rs b/portal-bridge/src/cli.rs index 713eb2c60..65578078a 100644 --- a/portal-bridge/src/cli.rs +++ b/portal-bridge/src/cli.rs @@ -3,17 +3,13 @@ use std::{env, net::SocketAddr, path::PathBuf, str::FromStr, sync::Arc}; use alloy::primitives::B256; use clap::Parser; use ethportal_api::{ - types::{ - cli::{ - check_private_key_length, network_parser, DEFAULT_DISCOVERY_PORT, DEFAULT_NETWORK, - DEFAULT_WEB3_HTTP_PORT, - }, - network::Subnetwork, - portal_wire::NetworkSpec, - }, + types::{network::Subnetwork, portal_wire::NetworkSpec}, Enr, }; -use portalnet::discovery::ENR_PORTAL_CLIENT_KEY; +use portalnet::{ + constants::{DEFAULT_DISCOVERY_PORT, DEFAULT_NETWORK, DEFAULT_WEB3_HTTP_PORT}, + discovery::ENR_PORTAL_CLIENT_KEY, +}; use reqwest::{ header::{HeaderMap, HeaderValue, CONTENT_TYPE}, Client, IntoUrl, Request, Response, @@ -22,6 +18,7 @@ use reqwest_middleware::{ClientBuilder, ClientWithMiddleware, RequestBuilder}; use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware}; use tokio::time::Duration; use tracing::error; +use trin_utils::cli::{check_private_key_length, network_parser}; use url::Url; use crate::{ diff --git a/ethportal-api/src/types/bootnodes.rs b/portalnet/src/bootnodes.rs similarity index 60% rename from ethportal-api/src/types/bootnodes.rs rename to portalnet/src/bootnodes.rs index c9d965e51..620710e7b 100644 --- a/ethportal-api/src/types/bootnodes.rs +++ b/portalnet/src/bootnodes.rs @@ -1,8 +1,8 @@ use std::str::FromStr; use anyhow::anyhow; - -use crate::types::{enr::Enr, network::Network}; +use ethportal_api::{types::network::Network, Enr}; +use lazy_static::lazy_static; #[derive(Clone, Debug, Eq, PartialEq)] pub struct Bootnode { @@ -139,84 +139,3 @@ impl FromStr for Bootnodes { } } } - -#[cfg(test)] -#[allow(clippy::unwrap_used)] -mod test { - use rstest::rstest; - - use super::*; - use crate::types::cli::TrinConfig; - - #[test_log::test] - fn test_bootnodes_default_with_default_bootnodes() { - let config = TrinConfig::new_from(["trin"]).unwrap(); - assert_eq!(config.bootnodes, Bootnodes::Default); - let bootnodes: Vec = config.bootnodes.to_enrs(Network::Mainnet); - assert_eq!(bootnodes.len(), 11); - } - - #[test_log::test] - fn test_bootnodes_default_with_explicit_default_bootnodes() { - let config = TrinConfig::new_from(["trin", "--bootnodes", "default"]).unwrap(); - assert_eq!(config.bootnodes, Bootnodes::Default); - let bootnodes: Vec = config.bootnodes.to_enrs(Network::Mainnet); - assert_eq!(bootnodes.len(), 11); - } - - #[test_log::test] - fn test_bootnodes_default_with_no_bootnodes() { - let config = TrinConfig::new_from(["trin", "--bootnodes", "none"]).unwrap(); - assert_eq!(config.bootnodes, Bootnodes::None); - let bootnodes: Vec = config.bootnodes.to_enrs(Network::Mainnet); - assert_eq!(bootnodes.len(), 0); - } - - #[rstest] - #[case("invalid")] - #[case("enr:-IS4QBISSFfBzsBrjq61iSIxPMfp5ShBTW6KQUglzH_tj8_SJaehXdlnZI-NAkTGeoclwnTB-pU544BQA44BiDZ2rkMBgmlkgnY0gmlwhKEjVaWJc2VjcDI1NmsxoQOSGugH1jSdiE_fRK1FIBe9oLxaWH8D_7xXSnaOVBe-SYN1ZHCCIyg,invalid")] - #[should_panic] - fn test_bootnodes_invalid_enr(#[case] bootnode: &str) { - TrinConfig::new_from(["trin", "--bootnodes", bootnode]).unwrap(); - } - - #[rstest] - #[case("enr:-IS4QBISSFfBzsBrjq61iSIxPMfp5ShBTW6KQUglzH_tj8_SJaehXdlnZI-NAkTGeoclwnTB-pU544BQA44BiDZ2rkMBgmlkgnY0gmlwhKEjVaWJc2VjcDI1NmsxoQOSGugH1jSdiE_fRK1FIBe9oLxaWH8D_7xXSnaOVBe-SYN1ZHCCIyg", 1)] - #[case("enr:-IS4QBISSFfBzsBrjq61iSIxPMfp5ShBTW6KQUglzH_tj8_SJaehXdlnZI-NAkTGeoclwnTB-pU544BQA44BiDZ2rkMBgmlkgnY0gmlwhKEjVaWJc2VjcDI1NmsxoQOSGugH1jSdiE_fRK1FIBe9oLxaWH8D_7xXSnaOVBe-SYN1ZHCCIyg,enr:-IS4QPUT9hwV4YfNTxazR2ltch4qKzvX_HwxQBw8gUN3q1MDfNyaD1EHc1wQZRTUzQQD-RVYx3h4nA1Sqk0Wx9DwzNABgmlkgnY0gmlwhM69ZOyJc2VjcDI1NmsxoQLaI-m2CDIjpwcnUf1ESspvOctJLpIrLA8AZ4zbo_1bFIN1ZHCCIyg", 2)] - #[case("enr:-IS4QBISSFfBzsBrjq61iSIxPMfp5ShBTW6KQUglzH_tj8_SJaehXdlnZI-NAkTGeoclwnTB-pU544BQA44BiDZ2rkMBgmlkgnY0gmlwhKEjVaWJc2VjcDI1NmsxoQOSGugH1jSdiE_fRK1FIBe9oLxaWH8D_7xXSnaOVBe-SYN1ZHCCIyg,enr:-IS4QPUT9hwV4YfNTxazR2ltch4qKzvX_HwxQBw8gUN3q1MDfNyaD1EHc1wQZRTUzQQD-RVYx3h4nA1Sqk0Wx9DwzNABgmlkgnY0gmlwhM69ZOyJc2VjcDI1NmsxoQLaI-m2CDIjpwcnUf1ESspvOctJLpIrLA8AZ4zbo_1bFIN1ZHCCIyg,enr:-IS4QB77AROcGX-TSkY-U-SaZJ5ma9ICQj6ETO3FqUdCnTZeJ0mDrdCKUqd5AQ0jrHa7m9-mOLvFFKMV_-tBD8uDYZUBgmlkgnY0gmlwhJ_fCDaJc2VjcDI1NmsxoQN9rahqamBOJfj4u6yssJQJ1-EZoyAw-7HIgp1FwNUdnoN1ZHCCIyg", 3)] - fn test_bootnodes_valid_enrs(#[case] bootnode: &str, #[case] expected_length: usize) { - let config = TrinConfig::new_from(["trin", "--bootnodes", bootnode]).unwrap(); - match config.bootnodes.clone() { - Bootnodes::Custom(bootnodes) => { - assert_eq!(bootnodes.len(), expected_length); - } - _ => panic!("Bootnodes should be custom"), - }; - let bootnodes: Vec = config.bootnodes.to_enrs(Network::Mainnet); - assert_eq!(bootnodes.len(), expected_length); - } - - #[rstest] - fn test_angelfood_network_defaults_to_correct_bootnodes() { - let config = TrinConfig::new_from(["trin", "--network", "angelfood"]).unwrap(); - assert_eq!(config.bootnodes, Bootnodes::Default); - let bootnodes: Vec = config.bootnodes.to_enrs(Network::Angelfood); - assert_eq!(bootnodes.len(), 1); - } - - #[rstest] - fn test_custom_bootnodes_override_angelfood_default() { - let enr = "enr:-IS4QBISSFfBzsBrjq61iSIxPMfp5ShBTW6KQUglzH_tj8_SJaehXdlnZI-NAkTGeoclwnTB-pU544BQA44BiDZ2rkMBgmlkgnY0gmlwhKEjVaWJc2VjcDI1NmsxoQOSGugH1jSdiE_fRK1FIBe9oLxaWH8D_7xXSnaOVBe-SYN1ZHCCIyg"; - let config = - TrinConfig::new_from(["trin", "--network", "angelfood", "--bootnodes", enr]).unwrap(); - assert_eq!( - config.bootnodes, - Bootnodes::Custom(vec![Bootnode { - enr: Enr::from_str(enr).unwrap(), - alias: "custom".to_string(), - }]) - ); - let bootnodes: Vec = config.bootnodes.to_enrs(Network::Angelfood); - assert_eq!(bootnodes.len(), 1); - } -} diff --git a/portalnet/src/config.rs b/portalnet/src/config.rs index ac068c942..25b90f327 100644 --- a/portalnet/src/config.rs +++ b/portalnet/src/config.rs @@ -1,17 +1,14 @@ use std::net::SocketAddr; use alloy::primitives::B256; -use ethportal_api::types::{ - bootnodes::Bootnodes, - cli::{TrinConfig, DEFAULT_UTP_TRANSFER_LIMIT}, - enr::Enr, - network::Network, -}; +use ethportal_api::types::{enr::Enr, network::Network}; + +use crate::{bootnodes::Bootnodes, constants::DEFAULT_UTP_TRANSFER_LIMIT}; /// Capacity of the cache for observed `NodeAddress` values. /// Provides capacity for 32 full k-buckets. This capacity will be shared among all active portal /// subnetworks. -const NODE_ADDR_CACHE_CAPACITY: usize = discv5::kbucket::MAX_NODES_PER_BUCKET * 32; +pub const NODE_ADDR_CACHE_CAPACITY: usize = discv5::kbucket::MAX_NODES_PER_BUCKET * 32; #[derive(Clone)] pub struct PortalnetConfig { @@ -45,20 +42,3 @@ impl Default for PortalnetConfig { } } } - -impl PortalnetConfig { - pub fn new(trin_config: &TrinConfig, private_key: B256) -> Self { - Self { - external_addr: trin_config.external_addr, - private_key, - listen_port: trin_config.discovery_port, - bootnodes: trin_config.bootnodes.to_enrs(trin_config.network.network()), - no_stun: trin_config.no_stun, - no_upnp: trin_config.no_upnp, - node_addr_cache_capacity: NODE_ADDR_CACHE_CAPACITY, - disable_poke: trin_config.disable_poke, - trusted_block_root: trin_config.trusted_block_root, - utp_transfer_limit: trin_config.utp_transfer_limit, - } - } -} diff --git a/portalnet/src/constants.rs b/portalnet/src/constants.rs index 2c1363ebb..f83c5a17e 100644 --- a/portalnet/src/constants.rs +++ b/portalnet/src/constants.rs @@ -6,3 +6,11 @@ use std::time::Duration; /// all peer interactions. A single RPC request may spawn many queries. Each query will typically /// spawn many requests to peers. pub const DEFAULT_QUERY_TIMEOUT: Duration = Duration::from_secs(60); + +pub const DEFAULT_WEB3_IPC_PATH: &str = "/tmp/trin-jsonrpc.ipc"; +pub const DEFAULT_WEB3_HTTP_ADDRESS: &str = "http://127.0.0.1:8545/"; +pub const DEFAULT_WEB3_HTTP_PORT: u16 = 8545; +pub const DEFAULT_WEB3_WS_PORT: u16 = 8546; +pub const DEFAULT_DISCOVERY_PORT: u16 = 9009; +pub const DEFAULT_UTP_TRANSFER_LIMIT: usize = 50; +pub const DEFAULT_NETWORK: &str = "mainnet"; diff --git a/portalnet/src/lib.rs b/portalnet/src/lib.rs index 1dbf6c471..5f3e23da3 100644 --- a/portalnet/src/lib.rs +++ b/portalnet/src/lib.rs @@ -2,6 +2,7 @@ #![warn(clippy::uninlined_format_args)] pub mod accept_queue; +pub mod bootnodes; pub mod config; pub mod constants; pub mod discovery; diff --git a/portalnet/src/overlay/config.rs b/portalnet/src/overlay/config.rs index 5abd319d8..c29720805 100644 --- a/portalnet/src/overlay/config.rs +++ b/portalnet/src/overlay/config.rs @@ -3,9 +3,12 @@ use std::time::Duration; use discv5::kbucket::{Filter, MAX_NODES_PER_BUCKET}; -use ethportal_api::types::{cli::DEFAULT_UTP_TRANSFER_LIMIT, enr::Enr}; +use ethportal_api::types::enr::Enr; -use crate::{constants::DEFAULT_QUERY_TIMEOUT, types::node::Node}; +use crate::{ + constants::{DEFAULT_QUERY_TIMEOUT, DEFAULT_UTP_TRANSFER_LIMIT}, + types::node::Node, +}; /// Configuration parameters for the overlay network. #[derive(Clone)] diff --git a/portalnet/src/overlay/protocol.rs b/portalnet/src/overlay/protocol.rs index 42887282b..03d49085e 100644 --- a/portalnet/src/overlay/protocol.rs +++ b/portalnet/src/overlay/protocol.rs @@ -16,7 +16,6 @@ use discv5::{ }; use ethportal_api::{ types::{ - bootnodes::Bootnode, discv5::RoutingTableInfo, distance::{Distance, Metric}, enr::Enr, @@ -41,6 +40,7 @@ use trin_validation::validator::{ValidationResult, Validator}; use utp_rs::socket::UtpSocket; use crate::{ + bootnodes::Bootnode, discovery::{Discovery, UtpEnr}, events::EventEnvelope, find::query_info::{FindContentResult, RecursiveFindContentResult}, diff --git a/portalnet/src/overlay/service.rs b/portalnet/src/overlay/service.rs index 482d7e6f3..24190ae17 100644 --- a/portalnet/src/overlay/service.rs +++ b/portalnet/src/overlay/service.rs @@ -2611,11 +2611,8 @@ mod tests { use alloy::primitives::U256; use discv5::kbucket; use ethportal_api::types::{ - cli::{DEFAULT_DISCOVERY_PORT, DEFAULT_UTP_TRANSFER_LIMIT}, - content_key::overlay::IdentityContentKey, - distance::XorMetric, - enr::generate_random_remote_enr, - portal_wire::MAINNET, + content_key::overlay::IdentityContentKey, distance::XorMetric, + enr::generate_random_remote_enr, portal_wire::MAINNET, }; use kbucket::KBucketsTable; use rstest::*; @@ -2632,6 +2629,7 @@ mod tests { use super::*; use crate::{ config::PortalnetConfig, + constants::{DEFAULT_DISCOVERY_PORT, DEFAULT_UTP_TRANSFER_LIMIT}, discovery::{Discovery, NodeAddress}, overlay::config::OverlayConfig, }; diff --git a/portalnet/src/types/kbucket.rs b/portalnet/src/types/kbucket.rs index 370e2db59..66f5481a7 100644 --- a/portalnet/src/types/kbucket.rs +++ b/portalnet/src/types/kbucket.rs @@ -367,10 +367,7 @@ mod tests { enr::CombinedKey, kbucket::{FailureReason, MAX_NODES_PER_BUCKET}, }; - use ethportal_api::{ - generate_random_remote_enr, - types::{cli::DEFAULT_DISCOVERY_PORT, distance::XorMetric}, - }; + use ethportal_api::{generate_random_remote_enr, types::distance::XorMetric}; use itertools::chain; use super::*; @@ -538,6 +535,7 @@ mod tests { mod insert_or_update_discovered_nodes { use super::*; + use crate::constants::DEFAULT_DISCOVERY_PORT; #[test] fn simple_insert_and_update() { diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 444cf6353..6a9c4b8a6 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -12,7 +12,7 @@ rust-version.workspace = true version.workspace = true [dependencies] -alloy = { workspace = true, features = ["rpc-types-eth"] } +alloy = { workspace = true, features = ["rpc-types-eth", "provider-ipc", "provider-ws", "pubsub", "reqwest"] } discv5.workspace = true eth_trie.workspace = true ethportal-api.workspace = true diff --git a/rpc/src/config.rs b/rpc/src/config.rs new file mode 100644 index 000000000..7a1c1c76d --- /dev/null +++ b/rpc/src/config.rs @@ -0,0 +1,16 @@ +use std::{path::PathBuf, sync::Arc}; + +use alloy::transports::http::reqwest::Url; +use ethportal_api::types::network::Subnetwork; +use trin_utils::cli::Web3TransportType; + +/// Configuration for the RPC server. +#[derive(Clone)] +pub struct RpcConfig { + pub portal_subnetworks: Arc>, + pub web3_transport: Web3TransportType, + pub web3_ipc_path: PathBuf, + pub web3_http_address: Url, + pub ws: bool, + pub ws_port: u16, +} diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 8bd87892a..96c678387 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -3,6 +3,7 @@ mod beacon_rpc; mod builder; +pub mod config; mod cors; mod discv5_rpc; mod errors; @@ -22,13 +23,13 @@ use std::{ use beacon_rpc::BeaconNetworkApi; pub use builder::{PortalRpcModule, RpcModuleBuilder, TransportRpcModuleConfig}; +use config::RpcConfig; use discv5_rpc::Discv5Api; use errors::RpcError; use eth_rpc::EthApi; use ethportal_api::{ jsonrpsee, types::{ - cli::{TrinConfig, Web3TransportType}, jsonrpc::request::{BeaconJsonRpcRequest, HistoryJsonRpcRequest, StateJsonRpcRequest}, network::Subnetwork, }, @@ -38,13 +39,14 @@ use portalnet::discovery::Discovery; use reth_ipc::server::Builder as IpcServerBuilder; use state_rpc::StateNetworkApi; use tokio::sync::mpsc; +use trin_utils::cli::Web3TransportType; use web3_rpc::Web3Api; pub use crate::rpc_server::RpcServerHandle; use crate::{jsonrpsee::server::ServerBuilder, rpc_server::RpcServerConfig}; pub async fn launch_jsonrpc_server( - trin_config: TrinConfig, + rpc_config: RpcConfig, discv5: Arc, history_handler: Option>, state_handler: Option>, @@ -53,7 +55,7 @@ pub async fn launch_jsonrpc_server( // Discv5 and Web3 modules are enabled with every network let mut modules = vec![PortalRpcModule::Discv5, PortalRpcModule::Web3]; - for network in trin_config.portal_subnetworks.iter() { + for network in rpc_config.portal_subnetworks.iter() { match network { Subnetwork::History => { modules.push(PortalRpcModule::History); @@ -65,7 +67,7 @@ pub async fn launch_jsonrpc_server( } } - let handle: RpcServerHandle = match trin_config.web3_transport { + let handle: RpcServerHandle = match rpc_config.web3_transport { Web3TransportType::IPC => { let transport = TransportRpcModuleConfig::default().with_ipc(modules); let transport_modules = RpcModuleBuilder::new(discv5) @@ -76,7 +78,7 @@ pub async fn launch_jsonrpc_server( RpcServerConfig::default() .with_ipc_endpoint( - trin_config + rpc_config .web3_ipc_path .to_str() .expect("Path should be string"), @@ -86,7 +88,7 @@ pub async fn launch_jsonrpc_server( .await? } Web3TransportType::HTTP => { - let transport = match trin_config.ws { + let transport = match rpc_config.ws { true => TransportRpcModuleConfig::default().with_ws(modules.clone()), false => TransportRpcModuleConfig::default(), }; @@ -100,17 +102,17 @@ pub async fn launch_jsonrpc_server( let rpc_server_config = RpcServerConfig::default() .with_http_address( - trin_config + rpc_config .web3_http_address .socket_addrs(|| None) .expect("Invalid socket address")[0], ) .with_http(ServerBuilder::default()); - let rpc_server_config = match trin_config.ws { + let rpc_server_config = match rpc_config.ws { true => rpc_server_config .with_ws_address(SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::UNSPECIFIED, - trin_config.ws_port, + rpc_config.ws_port, ))) .with_ws(ServerBuilder::default()), false => rpc_server_config, diff --git a/rpc/src/rpc_server.rs b/rpc/src/rpc_server.rs index 88d28e6ef..a8bc90fc3 100644 --- a/rpc/src/rpc_server.rs +++ b/rpc/src/rpc_server.rs @@ -3,10 +3,8 @@ use std::{ net::{Ipv4Addr, SocketAddr, SocketAddrV4}, }; -use ethportal_api::{ - jsonrpsee::server::IdProvider, - types::cli::{DEFAULT_WEB3_HTTP_PORT, DEFAULT_WEB3_IPC_PATH, DEFAULT_WEB3_WS_PORT}, -}; +use ethportal_api::jsonrpsee::server::IdProvider; +use portalnet::constants::{DEFAULT_WEB3_HTTP_PORT, DEFAULT_WEB3_IPC_PATH, DEFAULT_WEB3_WS_PORT}; use reth_ipc::server::{Builder as IpcServerBuilder, IpcServer}; use tower::layer::util::{Identity, Stack}; use tower_http::cors::CorsLayer; diff --git a/src/bin/historical_batch.rs b/src/bin/historical_batch.rs deleted file mode 100644 index 979c7ac29..000000000 --- a/src/bin/historical_batch.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::{ - fs::{File, OpenOptions}, - io, - io::{BufRead, Write}, -}; - -use anyhow::ensure; -use e2store::era::Era; -use ethportal_api::{consensus::beacon_state::HistoricalBatch, utils::bytes::hex_encode}; -use regex::Regex; -use reqwest::get; -use ssz::Encode; -use tree_hash::TreeHash; - -const _BELLATRIX_SLOT: u64 = 4700013; -const _CAPELLA_SLOT: u64 = 6209536; - -const ERA_URL: &str = "https://mainnet.era.nimbus.team/"; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let download_dir = dirs::download_dir().unwrap(); - let file_path = download_dir.join("historical_batches"); - - let body = get(ERA_URL).await?.text().await.unwrap(); - // Match era files with epoch between 00573 and 00759 - let re = - Regex::new(r#"".*((0057[3-9])|(005[8-9][0-9])|006[0-9][0-9]|(007[0-5][0-9])).*\.era""#) - .unwrap(); - - let mut file_urls = vec![]; - - for cap in re.captures_iter(&body) { - let file_name = &cap[0].strip_prefix('"').unwrap().strip_suffix('"').unwrap(); - let file_url = format!("{ERA_URL}{file_name}"); - file_urls.push(file_url); - } - - ensure!( - file_urls.len() == 187, - "Expected 187 era files, found {:?}", - file_urls.len() - ); - - for url in &file_urls { - let mut url_file = OpenOptions::new() - .read(true) - .write(true) - .create(true) - .truncate(true) - .open(file_path.join("era_urls.txt")) - .unwrap(); - - let re = Regex::new(r#"\d+"#).unwrap(); - let cap = re.captures(url).unwrap(); - let era_number = cap[0].parse::().unwrap(); - println!("Downloading Era number: {:?}", era_number); - - if url_exists_in_file(url_file.try_clone().unwrap(), url).unwrap() { - println!("Era number: {:?} already exists", era_number); - continue; - } - - let res = get(url).await?.bytes().await.unwrap(); - let beacon_state = Era::deserialize_to_beacon_state(&res).unwrap(); - let historical_batch = HistoricalBatch { - block_roots: beacon_state.block_roots().clone(), - state_roots: beacon_state.state_roots().clone(), - }; - - let hash = hex_encode(historical_batch.tree_hash_root().as_slice()) - .strip_prefix("0x") - .unwrap() - .to_string(); - - let first_eight_chars = hash.chars().take(8).collect::(); - - // save `HistoricalBatch` to file - let mut file = File::create(file_path.join(format!( - "historical_batch-{}-{}.ssz", - era_number, first_eight_chars - ))) - .unwrap(); - file.write_all(&historical_batch.as_ssz_bytes()).unwrap(); - // append url to file_url file - url_file.write_all(format!("{}\n", url).as_bytes()).unwrap(); - } - - Ok(()) -} - -fn url_exists_in_file(file: File, url: &str) -> io::Result { - let reader = io::BufReader::new(file); - - // Iterate over the lines in the file - for line in reader.lines() { - let line = line?; - - // Check if the line contains the URL - if line.contains(url) { - return Ok(true); - } - } - - Ok(false) -} diff --git a/src/bin/poll_latest.rs b/src/bin/poll_latest.rs deleted file mode 100644 index ea963069c..000000000 --- a/src/bin/poll_latest.rs +++ /dev/null @@ -1,332 +0,0 @@ -use std::{ - str::FromStr, - sync::{Arc, Mutex}, - time::Instant, -}; - -use alloy::{ - primitives::B256, - providers::{Provider, ProviderBuilder, WsConnect}, -}; -use anyhow::{anyhow, Result}; -use clap::Parser; -use ethportal_api::{ - jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, - types::content_key::overlay::OverlayContentKey, - HistoryContentKey, HistoryNetworkApiClient, -}; -use futures::StreamExt; -use tokio::time::{sleep, Duration}; -use tracing::{debug, info, warn}; -use trin_utils::log::init_tracing_logger; -use url::Url; - -// tldr; -// to poll latest blocks -// cargo run --bin poll_latest -// to poll latest blocks with exponential backoff and give up trying -// to find a piece of data after 240 seconds: -// cargo run --bin poll_latest -- --timeout 240 -// to poll latest blocks, and retry every 3 seconds if the data is not found: -// cargo run --bin poll_latest -- --backoff linear:3 - -const DEFAULT_NODE_IP: &str = "http://127.0.0.1:8545"; - -#[derive(Default)] -struct Metrics { - header_by_hash: Details, - header_by_number: Details, - block_body: Details, - receipts: Details, - // the active audit count measures every individual, active, RFC lookup - active_audit_count: u32, - // the complete audit count measures the total number of blocks whose audits have completed - complete_audit_count: u32, -} - -impl Metrics { - fn display_stats(&self) { - info!( - "Audits (active/complete): {:?}/{:?} // HeaderByHash {:?}% @ {:.2?} // HeaderByNumber {:?}% @ {:.2?} // Bodies {:?}% @ {:.2?} // Receipts {:?}% @ {:.2?}", - self.active_audit_count, - self.complete_audit_count, - (self.header_by_hash.success_count * 100) / self.header_by_hash.total_count(), - self.header_by_hash.average_time, - (self.header_by_number.success_count * 100) / self.header_by_number.total_count(), - self.header_by_number.average_time, - (self.block_body.success_count * 100) / self.block_body.total_count(), - self.block_body.average_time, - (self.receipts.success_count * 100) / self.receipts.total_count(), - self.receipts.average_time); - debug!( - "HeaderByHash: {:?}/{:?} // HeaderByNumber: {:?}/{:?} // Bodies: {:?}/{:?} // Receipts: {:?}/{:?}", - self.header_by_hash.success_count, - self.header_by_hash.total_count(), - self.header_by_number.success_count, - self.header_by_number.total_count(), - self.block_body.success_count, - self.block_body.total_count(), - self.receipts.success_count, - self.receipts.total_count(), - ); - } -} - -pub const MAX_TIMEOUT: Duration = Duration::from_secs(240); - -#[derive(Debug, Default)] -struct Details { - success_count: u32, - failure_count: u32, - average_time: Duration, -} - -impl Details { - fn total_count(&self) -> u32 { - self.success_count + self.failure_count - } - - fn report_success(&mut self, new_time: Duration) { - self.average_time = - (self.average_time * self.success_count + new_time) / (self.success_count + 1); - self.success_count += 1; - } - - fn report_failure(&mut self) { - // don't update average time on failure - self.failure_count += 1; - } -} - -#[tokio::main] -pub async fn main() -> Result<()> { - init_tracing_logger(); - info!("Running Poll Latest script."); - let audit_config = AuditConfig::parse(); - let timeout = match audit_config.timeout { - Some(timeout) => Duration::from_secs(timeout), - None => MAX_TIMEOUT, - }; - let infura_project_id = std::env::var("TRIN_INFURA_PROJECT_ID")?; - let ws = WsConnect::new(format!("wss://mainnet.infura.io/ws/v3/{infura_project_id}")); - let provider = ProviderBuilder::new().on_ws(ws).await?; - let mut stream = provider.subscribe_blocks().await?.into_stream(); - let client = HttpClientBuilder::default().build(audit_config.node_ip)?; - let metrics = Arc::new(Mutex::new(Metrics::default())); - while let Some(block) = stream.next().await { - let block_hash = block.header.hash; - let block_number = block.header.number; - info!("Found new block {block_hash}"); - let timestamp = Instant::now(); - let metrics = metrics.clone(); - tokio::spawn(audit_block( - block_hash, - block_number, - timestamp, - timeout, - audit_config.backoff, - metrics, - client.clone(), - )); - } - Ok(()) -} - -async fn audit_block( - hash: B256, - block_number: u64, - timestamp: Instant, - timeout: Duration, - backoff: Backoff, - metrics: Arc>, - client: HttpClient, -) -> Result<()> { - metrics.lock().unwrap().active_audit_count += 3; - let header_by_hash_handle = tokio::spawn(audit_content_key( - HistoryContentKey::new_block_header_by_hash(hash), - timestamp, - timeout, - backoff, - client.clone(), - )); - let header_by_number_handle = tokio::spawn(audit_content_key( - HistoryContentKey::new_block_header_by_number(block_number), - timestamp, - timeout, - backoff, - client.clone(), - )); - let block_body_handle = tokio::spawn(audit_content_key( - HistoryContentKey::new_block_body(hash), - timestamp, - timeout, - backoff, - client.clone(), - )); - let receipts_handle = tokio::spawn(audit_content_key( - HistoryContentKey::new_block_receipts(hash), - timestamp, - timeout, - backoff, - client.clone(), - )); - match header_by_hash_handle.await? { - Ok(found_time) => { - let mut metrics = metrics.lock().unwrap(); - metrics.active_audit_count -= 1; - let time_diff = found_time - timestamp; - metrics.header_by_hash.report_success(time_diff); - } - Err(_) => { - let mut metrics = metrics.lock().unwrap(); - metrics.active_audit_count -= 1; - metrics.header_by_hash.report_failure(); - } - } - match header_by_number_handle.await? { - Ok(found_time) => { - let mut metrics = metrics.lock().unwrap(); - metrics.active_audit_count -= 1; - let time_diff = found_time - timestamp; - metrics.header_by_number.report_success(time_diff); - } - Err(_) => { - let mut metrics = metrics.lock().unwrap(); - metrics.header_by_number.report_failure(); - metrics.active_audit_count -= 1; - } - } - match block_body_handle.await? { - Ok(found_time) => { - let mut metrics = metrics.lock().unwrap(); - metrics.active_audit_count -= 1; - let time_diff = found_time - timestamp; - metrics.block_body.report_success(time_diff); - } - Err(_) => { - let mut metrics = metrics.lock().unwrap(); - metrics.block_body.report_failure(); - metrics.active_audit_count -= 1; - } - } - match receipts_handle.await? { - Ok(found_time) => { - let mut metrics = metrics.lock().unwrap(); - metrics.active_audit_count -= 1; - let time_diff = found_time - timestamp; - metrics.receipts.report_success(time_diff); - } - Err(_) => { - let mut metrics = metrics.lock().unwrap(); - metrics.receipts.report_failure(); - metrics.active_audit_count -= 1; - } - } - let mut metrics = metrics.lock().unwrap(); - metrics.complete_audit_count += 1; - metrics.display_stats(); - Ok(()) -} - -async fn audit_content_key( - content_key: HistoryContentKey, - timestamp: Instant, - timeout: Duration, - backoff: Backoff, - client: HttpClient, -) -> anyhow::Result { - let mut attempts = 0; - while Instant::now() - timestamp < timeout { - match client.get_content(content_key.clone()).await { - Ok(_) => return Ok(Instant::now()), - _ => { - attempts += 1; - let sleep_time = match backoff { - Backoff::Exponential => attempts * 2, - Backoff::Linear(delay) => delay, - }; - sleep(Duration::from_secs(sleep_time)).await; - } - } - } - let err_msg = format!( - "Unable to find content_key: {:?} within {timeout:?}", - content_key.to_hex(), - ); - warn!("{}", err_msg); - Err(anyhow!("{}", err_msg)) -} - -// CLI Parameter Handling -#[derive(Parser, Debug, PartialEq)] -#[command( - name = "Poll Latest Audit Configuration", - about = "Script to poll availability of latest data" -)] -pub struct AuditConfig { - #[arg(long, help = "max timeout in seconds")] - pub timeout: Option, - - #[arg( - long, - help = "mode for backoff (eg. 'exponential' / 'linear:3' for every 3 seconds)", - default_value = "exponential" - )] - pub backoff: Backoff, - - #[arg(long, help = "ip address of node", default_value = DEFAULT_NODE_IP)] - pub node_ip: Url, -} - -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum Backoff { - Linear(u64), - Exponential, -} - -type ParseError = &'static str; - -impl FromStr for Backoff { - type Err = ParseError; - - fn from_str(s: &str) -> Result { - match s { - "exponential" => Ok(Self::Exponential), - val => { - let index = val.find(':').ok_or("Invalid backoff: unable to find `:`")?; - let (mode, val) = val.split_at(index); - match mode { - "linear" => { - let val = val.trim_start_matches(':'); - let val = val - .parse::() - .map_err(|_| "Invalid backoff: unable to parse delay")?; - Ok(Self::Linear(val)) - } - _ => Err("Invalid backoff: unsupported mode."), - } - } - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_detail() { - let mut details = Details::default(); - assert_eq!(details.total_count(), 0); - details.report_success(Duration::from_secs(1)); - assert_eq!(details.total_count(), 1); - assert_eq!(details.average_time, Duration::from_secs(1)); - details.report_failure(); - assert_eq!(details.total_count(), 2); - details.report_success(Duration::from_secs(3)); - assert_eq!(details.total_count(), 3); - assert_eq!(details.average_time, Duration::from_secs(2)); - details.report_failure(); - assert_eq!(details.total_count(), 4); - } -} diff --git a/src/bin/purge_invalid_history_content.rs b/src/bin/purge_invalid_history_content.rs deleted file mode 100644 index b115e13c3..000000000 --- a/src/bin/purge_invalid_history_content.rs +++ /dev/null @@ -1,108 +0,0 @@ -use alloy::primitives::B256; -use anyhow::Result; -use clap::Parser; -use discv5::enr::{CombinedKey, Enr}; -use ethportal_api::types::{ - cli::StorageCapacityConfig, - network::{Network, Subnetwork}, -}; -use portalnet::utils::db::{configure_node_data_dir, configure_trin_data_dir}; -use tracing::info; -use trin_storage::{ - versioned::{ContentType, IdIndexedV1StoreConfig}, - PortalStorageConfigFactory, -}; -use trin_utils::log::init_tracing_logger; - -/// iterates history store and removes any invalid network entries -pub fn main() -> Result<()> { - init_tracing_logger(); - let script_config = PurgeConfig::parse(); - - let trin_data_dir = - configure_trin_data_dir(None /* data_dir */, false /* ephemeral */)?; - let (node_data_dir, mut private_key) = - configure_node_data_dir(&trin_data_dir, script_config.private_key, Network::Mainnet)?; - let enr_key = CombinedKey::secp256k1_from_bytes(private_key.as_mut_slice()) - .expect("Failed to create ENR key"); - let enr = Enr::empty(&enr_key).unwrap(); - let node_id = enr.node_id(); - info!("Purging data for NodeID: {node_id}"); - info!("DB Path: {node_data_dir:?}"); - - let config = PortalStorageConfigFactory::new( - StorageCapacityConfig::Combined { - total_mb: script_config.capacity as u32, - subnetworks: vec![Subnetwork::History], - }, - node_id, - node_data_dir, - ) - .unwrap() - .create(&Subnetwork::History) - .unwrap(); - let config = IdIndexedV1StoreConfig::new(ContentType::History, Subnetwork::History, config); - let sql_connection_pool = config.sql_connection_pool.clone(); - let total_count = sql_connection_pool - .get() - .unwrap() - .query_row(&lookup_all_query(), [], |row| row.get::(0)) - .expect("Failed to lookup history content"); - info!("total entry count: {total_count}"); - let lookup_result = sql_connection_pool - .get() - .unwrap() - .query_row(&lookup_epoch_acc_query(), [], |row| { - row.get::(0) - }) - .expect("Failed to fetch history content"); - info!("found {} epoch accumulators", lookup_result); - if script_config.evict { - let removed_count = sql_connection_pool - .get() - .unwrap() - .execute(&delete_epoch_acc_query(), []) - .unwrap(); - info!("removed {} invalid history content values", removed_count); - } - Ok(()) -} - -fn lookup_all_query() -> String { - r#"SELECT COUNT(*) as count FROM ii1_history"#.to_string() -} - -fn lookup_epoch_acc_query() -> String { - r#"SELECT COUNT(*) as count FROM ii1_history WHERE hex(content_key) LIKE "03%""#.to_string() -} - -fn delete_epoch_acc_query() -> String { - r#"DELETE FROM ii1_history WHERE hex(content_key) LIKE '03%'"#.to_string() -} - -// CLI Parameter Handling -#[derive(Parser, Debug, PartialEq)] -#[command( - name = "Trin DB Purge Invalid History Content", - about = "Remove invalid data from Trin History Store" -)] -pub struct PurgeConfig { - #[arg( - long, - help = "(unsafe) Hex private key to generate node id for database namespace (with 0x prefix)" - )] - pub private_key: Option, - - #[arg( - long, - help = "Storage capacity. Must be larger than the current capacity otherwise it will prune data!" - )] - pub capacity: usize, - - #[arg( - long, - help = "Actually evict the history data from db", - default_value = "false" - )] - pub evict: bool, -} diff --git a/src/bin/sample_range.rs b/src/bin/sample_range.rs deleted file mode 100644 index e7d8f3214..000000000 --- a/src/bin/sample_range.rs +++ /dev/null @@ -1,254 +0,0 @@ -use std::{ - str::FromStr, - sync::{Arc, Mutex}, -}; - -use alloy::{ - eips::BlockNumberOrTag, - primitives::B256, - providers::{Provider, ProviderBuilder}, -}; -use anyhow::Result; -use clap::Parser; -use ethportal_api::{ - jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, - HistoryContentKey, HistoryNetworkApiClient, -}; -use futures::StreamExt; -use rand::seq::SliceRandom; -use tracing::{debug, info, warn}; -use trin_utils::log::init_tracing_logger; -use trin_validation::constants::MERGE_BLOCK_NUMBER; -use url::Url; - -// tldr -// to sample 5 blocks from the shanghai fork: -// cargo run --bin sample_range -- --sample-size 5 --range shanghai -// to sample 50 blocks from the since block #100000: -// cargo run --bin sample_range -- --sample-size 50 --range since:100000 -// to sample 5 blocks from the latest 500 blocks: -// cargo run --bin sample_range -- --sample-size 5 --range latest:500 -// with a custom node ip: -// cargo run --bin sample_range -- --sample-size 1 --range range:1-2 --node-ip http://127.0.0.1:50933 - -#[derive(Default)] -struct Metrics { - header_by_hash: Details, - header_by_number: Details, - block_body: Details, - receipts: Details, -} - -impl Metrics { - fn display_stats(&self) { - info!( - "HeaderByHash {:?}% // HeaderByNumber {:?}% // Bodies {:?}% // Receipts {:?}%", - self.header_by_hash.success_rate(), - self.header_by_number.success_rate(), - self.block_body.success_rate(), - self.receipts.success_rate(), - ); - debug!( - "HeaderByHash: {:?}/{:?} // HeaderByNumber: {:?}/{:?} // Bodies: {:?}/{:?} // Receipts: {:?}/{:?}", - self.header_by_hash.success_count, - self.header_by_hash.total_count(), - self.header_by_number.success_count, - self.header_by_number.total_count(), - self.block_body.success_count, - self.block_body.total_count(), - self.receipts.success_count, - self.receipts.total_count() - ); - } -} - -const FUTURES_BUFFER_SIZE: usize = 8; -const SHANGHAI_BLOCK_NUMBER: u64 = 17034870; -const DEFAULT_NODE_IP: &str = "http://127.0.0.1:8545"; - -#[derive(Debug, Default)] -struct Details { - success_count: u32, - failure_count: u32, -} - -impl Details { - fn total_count(&self) -> u32 { - self.success_count + self.failure_count - } - - fn success_rate(&self) -> u32 { - if self.total_count() == 0 { - 0 - } else { - (self.success_count * 100) / self.total_count() - } - } -} - -#[tokio::main] -pub async fn main() -> Result<()> { - init_tracing_logger(); - let audit_config = SampleConfig::parse(); - info!("Running Sample Range Audit: {:?}", audit_config.range); - let infura_project_id = std::env::var("TRIN_INFURA_PROJECT_ID")?; - let provider = ProviderBuilder::new() - .on_http(format!("https://mainnet.infura.io/v3/{infura_project_id}").parse()?); - let client = HttpClientBuilder::default().build(audit_config.node_ip)?; - let latest_block: u64 = provider.get_block_number().await?; - let (start, end) = match audit_config.range { - SampleRange::Shanghai => (SHANGHAI_BLOCK_NUMBER, latest_block), - SampleRange::FourFours => (0, MERGE_BLOCK_NUMBER), - SampleRange::Since(since) => (since, latest_block), - SampleRange::Latest(latest) => (latest_block - latest, latest_block), - SampleRange::Range(start, end) => (start, end), - }; - let mut blocks: Vec = (start..end).collect(); - let sample_size = std::cmp::min(audit_config.sample_size, blocks.len()); - info!("Sampling {sample_size} blocks from range: {start:?} - {end:?}"); - blocks.shuffle(&mut rand::thread_rng()); - let blocks_to_sample = blocks[0..sample_size].to_vec(); - let metrics = Arc::new(Mutex::new(Metrics::default())); - let futures = futures::stream::iter(blocks_to_sample.into_iter().map(|block_number| { - let client = client.clone(); - let metrics = metrics.clone(); - let provider = provider.clone(); - async move { - let block_hash = provider - .get_block_by_number(BlockNumberOrTag::Number(block_number), false) - .await - .unwrap() - .unwrap() - .header - .hash; - let _ = audit_block(block_number, block_hash, metrics, client).await; - } - })) - .buffer_unordered(FUTURES_BUFFER_SIZE) - .collect::>(); - futures.await; - metrics.lock().unwrap().display_stats(); - Ok(()) -} - -async fn audit_block( - block_number: u64, - hash: B256, - metrics: Arc>, - client: HttpClient, -) -> anyhow::Result<()> { - let header_by_hash_ck = HistoryContentKey::new_block_header_by_hash(hash); - let header_by_number_ck = HistoryContentKey::new_block_header_by_number(block_number); - let body_ck = HistoryContentKey::new_block_body(hash); - let receipts_ck = HistoryContentKey::new_block_receipts(hash); - match client.get_content(header_by_hash_ck).await { - Ok(_) => { - metrics.lock().unwrap().header_by_hash.success_count += 1; - } - Err(_) => { - warn!("Header by hash not found for block #{block_number} - {hash:?}"); - metrics.lock().unwrap().header_by_hash.failure_count += 1; - } - } - match client.get_content(header_by_number_ck).await { - Ok(_) => { - metrics.lock().unwrap().header_by_number.success_count += 1; - } - Err(_) => { - warn!("Header by number not found for block #{block_number} - {hash:?}"); - metrics.lock().unwrap().header_by_number.failure_count += 1; - } - } - match client.get_content(body_ck).await { - Ok(_) => { - metrics.lock().unwrap().block_body.success_count += 1; - } - Err(_) => { - warn!("Body not found for block #{block_number} - {hash:?}"); - metrics.lock().unwrap().block_body.failure_count += 1; - } - } - match client.get_content(receipts_ck).await { - Ok(_) => { - metrics.lock().unwrap().receipts.success_count += 1; - } - Err(_) => { - warn!("Receipts not found for block #{block_number} - {hash:?}"); - metrics.lock().unwrap().receipts.failure_count += 1; - } - } - Ok(()) -} - -// CLI Parameter Handling -#[derive(Parser, Debug, PartialEq)] -#[command( - name = "Sample Config", - about = "Script to sample random blocks from a specified range" -)] -pub struct SampleConfig { - #[arg( - long, - help = "Range to sample blocks from (shanghai, fourfours, since:123, latest:123, range:123-456)" - )] - pub range: SampleRange, - - #[arg(long, help = "Number of blocks to sample")] - pub sample_size: usize, - - #[arg(long, help = "ip address of node", default_value = DEFAULT_NODE_IP)] - pub node_ip: Url, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum SampleRange { - FourFours, - Shanghai, - Since(u64), - Latest(u64), - Range(u64, u64), -} - -type ParseError = &'static str; - -impl FromStr for SampleRange { - type Err = ParseError; - - fn from_str(s: &str) -> Result { - match s { - "fourfours" => Ok(Self::FourFours), - "shanghai" => Ok(Self::Shanghai), - val => { - let index = val.find(':').ok_or("Invalid sample range, missing `:`")?; - let (mode, val) = val.split_at(index); - let val = val.trim_start_matches(':'); - match mode { - "since" => { - let block = val - .parse::() - .map_err(|_| "Invalid sample range: unable to parse block number")?; - Ok(Self::Since(block)) - } - "latest" => { - let block = val - .parse::() - .map_err(|_| "Invalid sample range: unable to parse block number")?; - Ok(Self::Latest(block)) - } - "range" => { - let index = val.find('-').ok_or("Invalid sample range, missing `-`")?; - let (start, end) = val.split_at(index); - let start = start.parse::().map_err(|_| { - "Invalid sample range: unable to parse start block number" - })?; - let end = end.trim_start_matches('-').parse::().map_err(|_| { - "Invalid sample range: unable to parse end block number" - })?; - Ok(Self::Range(start, end)) - } - _ => Err("Invalid sample range: invalid mode"), - } - } - } - } -} diff --git a/src/bin/test_providers.rs b/src/bin/test_providers.rs deleted file mode 100644 index 3c866bb89..000000000 --- a/src/bin/test_providers.rs +++ /dev/null @@ -1,464 +0,0 @@ -use std::{fmt, fs, ops::Range, sync::Arc}; - -use anyhow::{anyhow, Result}; -use clap::Parser; -use ethportal_api::{ - types::{ - execution::accumulator::EpochAccumulator, - jsonrpc::{params::Params, request::JsonRequest}, - }, - utils::bytes::hex_encode, - Header, -}; -use portal_bridge::{api::execution::ExecutionApi, constants::DEFAULT_TOTAL_REQUEST_TIMEOUT}; -use rand::{ - distributions::{Distribution, Uniform}, - thread_rng, -}; -use reqwest::{ - header::{HeaderMap, HeaderValue, CONTENT_TYPE}, - Client, -}; -use serde_json::json; -use ssz::Decode; -use tracing::{debug, info, warn}; -use trin_utils::log::init_tracing_logger; -use trin_validation::{ - accumulator::PreMergeAccumulator, - constants::{ - BERLIN_BLOCK_NUMBER, BYZANTIUM_BLOCK_NUMBER, CONSTANTINOPLE_BLOCK_NUMBER, EPOCH_SIZE, - HOMESTEAD_BLOCK_NUMBER, ISTANBUL_BLOCK_NUMBER, LONDON_BLOCK_NUMBER, MERGE_BLOCK_NUMBER, - SHANGHAI_BLOCK_NUMBER, - }, - header_validator::HeaderValidator, -}; -use url::Url; - -lazy_static::lazy_static! { - static ref PANDAOPS_CLIENT_ID: String = std::env::var("PANDAOPS_CLIENT_ID").unwrap(); - static ref PANDAOPS_CLIENT_SECRET: String = std::env::var("PANDAOPS_CLIENT_SECRET").unwrap(); -} - -// tldr: -// Randomly samples X blocks from every hard fork range. -// Validates that each provider is able to return valid -// headers, receipts, and block bodies for each randomly sampled block. -// Tested Providers: -// - Infura -// - PandaOps-Erigon -// - PandaOps-Geth -// - PandaOps-Archive -// -// cargo run --bin test_providers -- --sample-size 5 -// - -#[tokio::main] -pub async fn main() -> Result<()> { - init_tracing_logger(); - let config = ProviderConfig::parse(); - let latest_block = get_latest_block_number().await?; - info!("Starting to test providers: latest block = {latest_block}"); - let mut all_ranges = Ranges::into_vec(config.sample_size, latest_block); - let mut all_providers: Vec = Providers::into_vec(); - for provider in all_providers.iter_mut() { - info!("Testing Provider: {provider}"); - let mut provider_failures = 0; - let client_url = provider.get_client_url(); - let api = ExecutionApi { - primary: client_url.clone(), - fallback: client_url, - header_validator: HeaderValidator::default(), - request_timeout: DEFAULT_TOTAL_REQUEST_TIMEOUT, - }; - for gossip_range in all_ranges.iter_mut() { - debug!("Testing range: {gossip_range:?}"); - let mut range_failures = 0; - for block in gossip_range.blocks() { - debug!("Testing block: {block}"); - let epoch_acc = match lookup_epoch_acc(*block) { - Ok(epoch_acc) => epoch_acc, - Err(msg) => { - provider_failures += 3; - range_failures += 3; - warn!( - "--- failed to build valid header, receipts, & block body for block: {block}: Invalid epoch acc: {msg}" - ); - continue; - } - }; - let (full_header, _, _, _) = match api.get_header(*block, epoch_acc).await { - Ok(header) => header, - Err(_) => { - provider_failures += 3; - range_failures += 3; - warn!("--- failed to build valid header, receipts, & block body for block: {block}"); - continue; - } - }; - if let Err(msg) = api.get_receipts(&full_header).await { - provider_failures += 1; - range_failures += 1; - warn!("--- failed to build valid receipts for block: {block}: Error: {msg}"); - }; - if let Err(msg) = api.get_block_body(&full_header).await { - provider_failures += 1; - range_failures += 1; - warn!("--- failed to build valid block body for block: {block}: Error: {msg}"); - }; - } - let total = config.sample_size * 3; - gossip_range.update_success_rate(range_failures, total as u64); - debug!( - "Provider: {provider:?} // Range: {gossip_range:?} // Failures: {range_failures}/{total}" - ); - } - let total = - config.sample_size * Ranges::into_vec(config.sample_size, latest_block).len() * 3; - provider.update_success_rate(provider_failures, total as u64); - debug!("Provider Summary: {provider:?} // Failures: {provider_failures}/{total}"); - } - info!("Range Summary:"); - for range in all_ranges.iter() { - range.display_summary(); - } - info!("Provider Summary:"); - for provider in all_providers.iter() { - provider.display_summary(); - } - info!("Finished testing providers"); - Ok(()) -} - -fn lookup_epoch_acc(block: u64) -> Result>> { - if block >= MERGE_BLOCK_NUMBER { - return Ok(None); - } - let epoch_index = block / EPOCH_SIZE; - let pre_merge_acc = PreMergeAccumulator::default(); - let epoch_hash = pre_merge_acc.historical_epochs[epoch_index as usize]; - let epoch_hash_pretty = hex_encode(epoch_hash); - let epoch_hash_pretty = epoch_hash_pretty.trim_start_matches("0x"); - let epoch_acc_path = - format!("./portal-accumulators/bridge_content/0x03{epoch_hash_pretty}.portalcontent"); - let local_epoch_acc = match fs::read(&epoch_acc_path) { - Ok(val) => EpochAccumulator::from_ssz_bytes(&val).map_err(|err| anyhow!("{err:?}"))?, - Err(_) => { - return Err(anyhow!( - "Unable to find local epoch acc at path: {epoch_acc_path:?}" - )) - } - }; - Ok(Some(Arc::new(local_epoch_acc))) -} - -// CLI Parameter Handling -#[derive(Parser, Debug, PartialEq)] -#[command( - name = "Provider Config", - about = "Script to test provider content building stuffs" -)] -pub struct ProviderConfig { - #[arg( - long, - help = "Number of samples to take for each range", - default_value = "5" - )] - pub sample_size: usize, -} - -#[derive(Debug, PartialEq)] -enum Ranges { - // vec of blocks is to store randomly sampled blocks / range - // so that the same blocks are tested across the providers - Frontier((Vec, SuccessRate)), - Homestead((Vec, SuccessRate)), - Byzantium((Vec, SuccessRate)), - Constantinople((Vec, SuccessRate)), - Istanbul((Vec, SuccessRate)), - Berlin((Vec, SuccessRate)), - London((Vec, SuccessRate)), - Merge((Vec, SuccessRate)), - Shanghai((Vec, SuccessRate)), -} - -impl fmt::Display for Ranges { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Ranges::Frontier(_) => write!(f, "Frontier"), - Ranges::Homestead(_) => write!(f, "Homestead"), - Ranges::Byzantium(_) => write!(f, "Byzantium"), - Ranges::Constantinople(_) => write!(f, "Constantinople"), - Ranges::Istanbul(_) => write!(f, "Istanbul"), - Ranges::Berlin(_) => write!(f, "Berlin"), - Ranges::London(_) => write!(f, "London"), - Ranges::Merge(_) => write!(f, "Merge"), - Ranges::Shanghai(_) => write!(f, "Shanghai"), - } - } -} - -impl Ranges { - fn display_summary(&self) { - let success_rate = match self { - Ranges::Frontier((_, success_rate)) - | Ranges::Homestead((_, success_rate)) - | Ranges::Byzantium((_, success_rate)) - | Ranges::Constantinople((_, success_rate)) - | Ranges::Istanbul((_, success_rate)) - | Ranges::Berlin((_, success_rate)) - | Ranges::London((_, success_rate)) - | Ranges::Merge((_, success_rate)) - | Ranges::Shanghai((_, success_rate)) => success_rate, - }; - info!( - "Range: {} // Failure Rate: {}/{}", - self, success_rate.failures, success_rate.total - ); - } - - fn blocks(&self) -> &Vec { - match self { - Ranges::Frontier((blocks, _)) - | Ranges::Homestead((blocks, _)) - | Ranges::Byzantium((blocks, _)) - | Ranges::Constantinople((blocks, _)) - | Ranges::Istanbul((blocks, _)) - | Ranges::Berlin((blocks, _)) - | Ranges::London((blocks, _)) - | Ranges::Merge((blocks, _)) - | Ranges::Shanghai((blocks, _)) => blocks, - } - } - - fn update_success_rate(&mut self, failures: u64, total: u64) { - match self { - Ranges::Frontier((_, success_rate)) - | Ranges::Homestead((_, success_rate)) - | Ranges::Byzantium((_, success_rate)) - | Ranges::Constantinople((_, success_rate)) - | Ranges::Istanbul((_, success_rate)) - | Ranges::Berlin((_, success_rate)) - | Ranges::London((_, success_rate)) - | Ranges::Merge((_, success_rate)) - | Ranges::Shanghai((_, success_rate)) => { - success_rate.failures += failures; - success_rate.total += total; - } - } - } - - pub fn into_vec(sample_size: usize, latest_block: u64) -> Vec { - let mut rng = thread_rng(); - vec![ - Ranges::Frontier(( - Uniform::from(Range { - start: 0, - end: HOMESTEAD_BLOCK_NUMBER, - }) - .sample_iter(&mut rng) - .take(sample_size) - .collect(), - SuccessRate::default(), - )), - Ranges::Homestead(( - Uniform::from(Range { - start: HOMESTEAD_BLOCK_NUMBER, - end: BYZANTIUM_BLOCK_NUMBER, - }) - .sample_iter(&mut rng) - .take(sample_size) - .collect(), - SuccessRate::default(), - )), - Ranges::Byzantium(( - Uniform::from(Range { - start: BYZANTIUM_BLOCK_NUMBER, - end: CONSTANTINOPLE_BLOCK_NUMBER, - }) - .sample_iter(&mut rng) - .take(sample_size) - .collect(), - SuccessRate::default(), - )), - Ranges::Constantinople(( - Uniform::from(Range { - start: CONSTANTINOPLE_BLOCK_NUMBER, - end: ISTANBUL_BLOCK_NUMBER, - }) - .sample_iter(&mut rng) - .take(sample_size) - .collect(), - SuccessRate::default(), - )), - Ranges::Istanbul(( - Uniform::from(Range { - start: ISTANBUL_BLOCK_NUMBER, - end: BERLIN_BLOCK_NUMBER, - }) - .sample_iter(&mut rng) - .take(sample_size) - .collect(), - SuccessRate::default(), - )), - Ranges::Berlin(( - Uniform::from(Range { - start: BERLIN_BLOCK_NUMBER, - end: LONDON_BLOCK_NUMBER, - }) - .sample_iter(&mut rng) - .take(sample_size) - .collect(), - SuccessRate::default(), - )), - Ranges::London(( - Uniform::from(Range { - start: LONDON_BLOCK_NUMBER, - end: MERGE_BLOCK_NUMBER, - }) - .sample_iter(&mut rng) - .take(sample_size) - .collect(), - SuccessRate::default(), - )), - Ranges::Merge(( - Uniform::from(Range { - start: MERGE_BLOCK_NUMBER, - end: SHANGHAI_BLOCK_NUMBER, - }) - .sample_iter(&mut rng) - .take(sample_size) - .collect(), - SuccessRate::default(), - )), - Ranges::Shanghai(( - Uniform::from(Range { - start: SHANGHAI_BLOCK_NUMBER, - end: latest_block, - }) - .sample_iter(&mut rng) - .take(sample_size) - .collect(), - SuccessRate::default(), - )), - ] - } -} - -#[derive(Debug, PartialEq, Default)] -struct SuccessRate { - failures: u64, - total: u64, -} - -#[derive(Debug, PartialEq)] -enum Providers { - PandaGeth(SuccessRate), - PandaErigon(SuccessRate), - PandaArchive(SuccessRate), - Infura(SuccessRate), -} - -impl fmt::Display for Providers { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Providers::PandaGeth(_) => write!(f, "PandaGeth"), - Providers::PandaErigon(_) => write!(f, "PandaErigon"), - Providers::PandaArchive(_) => write!(f, "PandaArchive"), - Providers::Infura(_) => write!(f, "Infura"), - } - } -} - -impl Providers { - fn display_summary(&self) { - let success_rate = match self { - Providers::PandaGeth(success_rate) - | Providers::PandaErigon(success_rate) - | Providers::PandaArchive(success_rate) - | Providers::Infura(success_rate) => success_rate, - }; - info!( - "Provider: {} // Failure Rate: {:?} / {:?}", - self, success_rate.failures, success_rate.total - ); - } - - fn update_success_rate(&mut self, failures: u64, total: u64) { - match self { - Providers::PandaGeth(success_rate) - | Providers::PandaErigon(success_rate) - | Providers::PandaArchive(success_rate) - | Providers::Infura(success_rate) => { - success_rate.failures += failures; - success_rate.total += total; - } - } - } - - fn into_vec() -> Vec { - vec![ - Providers::PandaGeth(SuccessRate::default()), - Providers::PandaErigon(SuccessRate::default()), - Providers::PandaArchive(SuccessRate::default()), - Providers::Infura(SuccessRate::default()), - ] - } - - fn get_client_url(&self) -> Url { - match self { - Providers::Infura(_) => { - let infura_key = std::env::var("TRIN_INFURA_PROJECT_ID").unwrap(); - Url::parse(&format!("https://mainnet.infura.io/v3/{}", infura_key)).unwrap() - } - _ => match self { - Providers::PandaGeth(_) => { - Url::parse("https://geth-lighthouse.mainnet.eu1.ethpandaops.io/") - .expect("to be able to parse static base el endpoint url") - } - Providers::PandaErigon(_) => { - Url::parse("https://erigon-lighthouse.mainnet.eu1.ethpandaops.io/") - .expect("to be able to parse static base el endpoint url") - } - Providers::PandaArchive(_) => Url::parse("https://archive.mainnet.ethpandaops.io/") - .expect("to be able to parse static base el endpoint url"), - _ => panic!("not implemented"), - }, - } - } -} - -async fn get_latest_block_number() -> Result { - let mut headers = HeaderMap::new(); - headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); - headers.insert( - "CF-Access-Client-Id", - HeaderValue::from_str(&PANDAOPS_CLIENT_ID) - .map_err(|_| anyhow!("Invalid CF-Access-Client-Id header value"))?, - ); - headers.insert( - "CF-Access-Client-Secret", - HeaderValue::from_str(&PANDAOPS_CLIENT_SECRET) - .map_err(|_| anyhow!("Invalid CF-Access-Client-Secret header value"))?, - ); - let request = JsonRequest::new( - "eth_getBlockByNumber".to_string(), - Params::Array(vec![json!("latest"), json!(false)]), - /* id= */ 1, - ); - let response = Client::new() - .post("https://geth-lighthouse.mainnet.eu1.ethpandaops.io/") - .headers(headers) - .json(&request) - .send() - .await - .map_err(|e| anyhow!("Request failed: {:?}", e))?; - let response = response - .json::() - .await - .map_err(|e| anyhow!("Failed to read response text: {:?}", e))?; - let result = response - .get("result") - .ok_or_else(|| anyhow!("Unable to fetch latest block"))?; - let header: Header = serde_json::from_value(result.clone())?; - Ok(header.number) -} diff --git a/trin-storage/src/config.rs b/trin-storage/src/config.rs index 07b3a6c49..69bd8364d 100644 --- a/trin-storage/src/config.rs +++ b/trin-storage/src/config.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; use discv5::enr::NodeId; -use ethportal_api::types::{cli::StorageCapacityConfig, network::Subnetwork}; +use ethportal_api::types::network::Subnetwork; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; @@ -9,6 +9,20 @@ use crate::{error::ContentStoreError, utils::setup_sql, DistanceFunction}; const BYTES_IN_MB_U64: u64 = 1000 * 1000; +/// The storage capacity configurtion. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StorageCapacityConfig { + Combined { + total_mb: u32, + subnetworks: Vec, + }, + Specific { + beacon_mb: Option, + history_mb: Option, + state_mb: Option, + }, +} + /// Factory for creating [PortalStorageConfig] instances pub struct PortalStorageConfigFactory { node_id: NodeId, diff --git a/trin-storage/src/test_utils.rs b/trin-storage/src/test_utils.rs index 228693665..4a1a29872 100644 --- a/trin-storage/src/test_utils.rs +++ b/trin-storage/src/test_utils.rs @@ -1,8 +1,11 @@ use discv5::enr::NodeId; -use ethportal_api::types::{cli::StorageCapacityConfig, network::Subnetwork}; +use ethportal_api::types::network::Subnetwork; use tempfile::TempDir; -use crate::{error::ContentStoreError, PortalStorageConfig, PortalStorageConfigFactory}; +use crate::{ + config::StorageCapacityConfig, error::ContentStoreError, PortalStorageConfig, + PortalStorageConfigFactory, +}; /// Creates temporary directory and PortalStorageConfig. pub fn create_test_portal_storage_config_with_capacity( diff --git a/trin-utils/Cargo.toml b/trin-utils/Cargo.toml index 5ec5a12ba..384dd695d 100644 --- a/trin-utils/Cargo.toml +++ b/trin-utils/Cargo.toml @@ -12,6 +12,8 @@ rust-version.workspace = true version.workspace = true [dependencies] +alloy.workspace = true +ethportal-api.workspace = true directories.workspace = true tempfile.workspace = true tracing.workspace = true diff --git a/trin-utils/src/cli.rs b/trin-utils/src/cli.rs new file mode 100644 index 000000000..d60616bb0 --- /dev/null +++ b/trin-utils/src/cli.rs @@ -0,0 +1,74 @@ +use core::fmt; +use std::{str::FromStr, sync::Arc}; + +use alloy::primitives::B256; +use ethportal_api::types::{ + network::Subnetwork, + portal_wire::{NetworkSpec, ANGELFOOD, MAINNET}, +}; + +#[derive(Debug, PartialEq, Clone)] +pub enum Web3TransportType { + HTTP, + IPC, +} + +impl fmt::Display for Web3TransportType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::HTTP => write!(f, "http"), + Self::IPC => write!(f, "ipc"), + } + } +} + +impl FromStr for Web3TransportType { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + match s { + "http" => Ok(Web3TransportType::HTTP), + "ipc" => Ok(Web3TransportType::IPC), + _ => Err("Invalid web3-transport arg. Expected either 'http' or 'ipc'"), + } + } +} + +pub fn check_private_key_length(private_key: &str) -> Result { + if private_key.len() == 66 { + return B256::from_str(private_key).map_err(|err| format!("HexError: {err}")); + } + Err(format!( + "Invalid private key length: {}, expected 66 (0x-prefixed 32 byte hexstring)", + private_key.len() + )) +} + +pub fn network_parser(network_string: &str) -> Result, String> { + match network_string { + "mainnet" => Ok(MAINNET.clone()), + "angelfood" => Ok(ANGELFOOD.clone()), + _ => Err(format!( + "Not a valid network: {network_string}, must be 'angelfood' or 'mainnet'" + )), + } +} + +pub fn subnetwork_parser(subnetwork_string: &str) -> Result>, String> { + let subnetworks = subnetwork_string + .split(',') + .map(Subnetwork::from_cli_arg) + .collect::, String>>()?; + + if subnetworks.is_empty() { + return Err("At least one subnetwork must be enabled".to_owned()); + } + + for subnetwork in &subnetworks { + if !subnetwork.is_active() { + return Err("{subnetwork} subnetwork has not yet been activated".to_owned()); + } + } + + Ok(Arc::new(subnetworks)) +} diff --git a/trin-utils/src/lib.rs b/trin-utils/src/lib.rs index 623e58449..d8a783d96 100644 --- a/trin-utils/src/lib.rs +++ b/trin-utils/src/lib.rs @@ -1,6 +1,7 @@ #![warn(clippy::unwrap_used)] #![warn(clippy::uninlined_format_args)] +pub mod cli; pub mod dir; pub mod log; pub mod submodules;