From a935daebd56db21ea6dc6b1d41da3fd0a6e7bb8b Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 3 Oct 2023 03:37:12 +0000 Subject: [PATCH 01/15] Clean `bors.toml` (#4795) unblock https://github.com/sigp/lighthouse/pull/4755 Co-authored-by: Paul Hauner Co-authored-by: realbigsean --- bors.toml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/bors.toml b/bors.toml index 9e633d63f57..3782ef4db30 100644 --- a/bors.toml +++ b/bors.toml @@ -1,5 +1,4 @@ status = [ - "cargo-fmt", "release-tests-ubuntu", "release-tests-windows", "debug-tests-ubuntu", @@ -9,20 +8,15 @@ status = [ "eth1-simulator-ubuntu", "merge-transition-ubuntu", "no-eth1-simulator-ubuntu", - "check-benchmarks", - "clippy", - "arbitrary-check", - "cargo-audit", "cargo-udeps", "beacon-chain-tests", "op-pool-tests", "doppelganger-protection-test", "execution-engine-integration-ubuntu", - "cargo-vendor", "check-msrv", "slasher-tests", "syncing-simulator-ubuntu", - "compile-with-beta-compiler" + "compile-with-beta-compiler", ] use_squash_merge = true timeout_sec = 10800 From 1c98806b6fe78b06bd83fda1f94b8c232ad79091 Mon Sep 17 00:00:00 2001 From: Jack McPherson Date: Tue, 3 Oct 2023 04:57:20 +0000 Subject: [PATCH 02/15] Allow libp2p to determine listening addresses (#4700) ## Issue Addressed #4675 ## Proposed Changes - Update local ENR (**only port numbers**) with local addresses received from libp2p (via `SwarmEvent::NewListenAddr`) - Only use the zero port for CLI tests ## Additional Info ### See Also ### - #4705 - #4402 - #4745 --- beacon_node/lighthouse_network/src/config.rs | 13 ++- .../lighthouse_network/src/discovery/mod.rs | 109 +++++++++++++++--- lighthouse/tests/beacon_node.rs | 93 +++++++++------ 3 files changed, 158 insertions(+), 57 deletions(-) diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index c3f6b60b045..c5077448823 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -16,6 +16,11 @@ use std::sync::Arc; use std::time::Duration; use types::{ForkContext, ForkName}; +pub const DEFAULT_IPV4_ADDRESS: Ipv4Addr = Ipv4Addr::UNSPECIFIED; +pub const DEFAULT_TCP_PORT: u16 = 9000u16; +pub const DEFAULT_DISC_PORT: u16 = 9000u16; +pub const DEFAULT_QUIC_PORT: u16 = 9001u16; + /// The cache time is set to accommodate the circulation time of an attestation. /// /// The p2p spec declares that we accept attestations within the following range: @@ -304,10 +309,10 @@ impl Default for Config { .expect("The total rate limit has been specified"), ); let listen_addresses = ListenAddress::V4(ListenAddr { - addr: Ipv4Addr::UNSPECIFIED, - disc_port: 9000, - quic_port: 9001, - tcp_port: 9000, + addr: DEFAULT_IPV4_ADDRESS, + disc_port: DEFAULT_DISC_PORT, + quic_port: DEFAULT_QUIC_PORT, + tcp_port: DEFAULT_TCP_PORT, }); let discv5_listen_config = diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 4d8807336bf..77fba905660 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -21,10 +21,11 @@ pub use libp2p::identity::{Keypair, PublicKey}; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use futures::prelude::*; use futures::stream::FuturesUnordered; +use libp2p::multiaddr::Protocol; use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; use libp2p::swarm::THandlerInEvent; pub use libp2p::{ - core::{ConnectedPoint, Multiaddr}, + core::{transport::ListenerId, ConnectedPoint, Multiaddr}, identity::PeerId, swarm::{ dummy::ConnectionHandler, ConnectionId, DialError, NetworkBehaviour, NotifyHandler, @@ -77,6 +78,19 @@ pub struct DiscoveredPeers { pub peers: HashMap>, } +/// Specifies which port numbers should be modified after start of the discovery service +#[derive(Debug)] +pub struct UpdatePorts { + /// TCP port associated wih IPv4 address (if present) + pub tcp4: bool, + /// TCP port associated wih IPv6 address (if present) + pub tcp6: bool, + /// QUIC port associated wih IPv4 address (if present) + pub quic4: bool, + /// QUIC port associated wih IPv6 address (if present) + pub quic6: bool, +} + #[derive(Clone, PartialEq)] struct SubnetQuery { subnet: Subnet, @@ -177,12 +191,8 @@ pub struct Discovery { /// always false. pub started: bool, - /// This keeps track of whether an external UDP port change should also indicate an internal - /// TCP port change. As we cannot detect our external TCP port, we assume that the external UDP - /// port is also our external TCP port. This assumption only holds if the user has not - /// explicitly set their ENR TCP port via the CLI config. The first indicates tcp4 and the - /// second indicates tcp6. - update_tcp_port: (bool, bool), + /// Specifies whether various port numbers should be updated after the discovery service has been started + update_ports: UpdatePorts, /// Logger for the discovery behaviour. log: slog::Logger, @@ -300,10 +310,12 @@ impl Discovery { } } - let update_tcp_port = ( - config.enr_tcp4_port.is_none(), - config.enr_tcp6_port.is_none(), - ); + let update_ports = UpdatePorts { + tcp4: config.enr_tcp4_port.is_none(), + tcp6: config.enr_tcp6_port.is_none(), + quic4: config.enr_quic4_port.is_none(), + quic6: config.enr_quic6_port.is_none(), + }; Ok(Self { cached_enrs: LruCache::new(50), @@ -314,7 +326,7 @@ impl Discovery { discv5, event_stream, started: !config.disable_discovery, - update_tcp_port, + update_ports, log, enr_dir, }) @@ -1006,8 +1018,8 @@ impl NetworkBehaviour for Discovery { // Discv5 will have updated our local ENR. We save the updated version // to disk. - if (self.update_tcp_port.0 && socket_addr.is_ipv4()) - || (self.update_tcp_port.1 && socket_addr.is_ipv6()) + if (self.update_ports.tcp4 && socket_addr.is_ipv4()) + || (self.update_ports.tcp6 && socket_addr.is_ipv6()) { // Update the TCP port in the ENR self.discv5.update_local_enr_socket(socket_addr, true); @@ -1036,12 +1048,79 @@ impl NetworkBehaviour for Discovery { FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => { self.on_dial_failure(peer_id, error) } + FromSwarm::NewListenAddr(ev) => { + let addr = ev.addr; + let listener_id = ev.listener_id; + + trace!(self.log, "Received NewListenAddr event from swarm"; "listener_id" => ?listener_id, "addr" => ?addr); + + let mut addr_iter = addr.iter(); + + let attempt_enr_update = match addr_iter.next() { + Some(Protocol::Ip4(_)) => match (addr_iter.next(), addr_iter.next()) { + (Some(Protocol::Tcp(port)), None) => { + if !self.update_ports.tcp4 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_tcp_port(port) + } + (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { + if !self.update_ports.quic4 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_quic_port(port) + } + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); + return; + } + }, + Some(Protocol::Ip6(_)) => match (addr_iter.next(), addr_iter.next()) { + (Some(Protocol::Tcp(port)), None) => { + if !self.update_ports.tcp6 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_tcp_port(port) + } + (Some(Protocol::Udp(port)), Some(Protocol::QuicV1)) => { + if !self.update_ports.quic6 { + debug!(self.log, "Skipping ENR update"; "multiaddr" => ?addr); + return; + } + + self.update_enr_quic_port(port) + } + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (unsupported transport)"; "addr" => ?addr); + return; + } + }, + _ => { + debug!(self.log, "Encountered unacceptable multiaddr for listening (no IP)"; "addr" => ?addr); + return; + } + }; + + let local_enr: Enr = self.discv5.local_enr(); + + match attempt_enr_update { + Ok(_) => { + info!(self.log, "Updated local ENR"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), "ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6()) + } + Err(e) => warn!(self.log, "Failed to update ENR"; "error" => ?e), + } + } FromSwarm::ConnectionEstablished(_) | FromSwarm::ConnectionClosed(_) | FromSwarm::AddressChange(_) | FromSwarm::ListenFailure(_) | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) | FromSwarm::ExpiredListenAddr(_) | FromSwarm::ListenerError(_) | FromSwarm::ListenerClosed(_) diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index bc6b6284e5a..4140a3f6b42 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -22,9 +22,14 @@ use types::{ Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, ProgressiveBalancesMode, }; -use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; +const DUMMY_ENR_TCP_PORT: u16 = 7777; +const DUMMY_ENR_UDP_PORT: u16 = 8888; +const DUMMY_ENR_QUIC_PORT: u16 = 9999; + +const _: () = + assert!(DUMMY_ENR_QUIC_PORT != 0 && DUMMY_ENR_TCP_PORT != 0 && DUMMY_ENR_UDP_PORT != 0); /// Returns the `lighthouse beacon_node` command. fn base_cmd() -> Command { @@ -1004,7 +1009,7 @@ fn network_listen_address_flag_wrong_double_v6_value_config() { } #[test] fn network_port_flag_over_ipv4() { - let port = unused_tcp4_port().expect("Unable to find unused port."); + let port = 0; CommandLineTest::new() .flag("port", Some(port.to_string().as_str())) .run() @@ -1021,7 +1026,7 @@ fn network_port_flag_over_ipv4() { } #[test] fn network_port_flag_over_ipv6() { - let port = unused_tcp6_port().expect("Unable to find unused port."); + let port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("port", Some(port.to_string().as_str())) @@ -1039,8 +1044,8 @@ fn network_port_flag_over_ipv6() { } #[test] fn network_port_and_discovery_port_flags_over_ipv4() { - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); - let disc4_port = unused_udp4_port().expect("Unable to find unused port."); + let tcp4_port = 0; + let disc4_port = 0; CommandLineTest::new() .flag("port", Some(tcp4_port.to_string().as_str())) .flag("discovery-port", Some(disc4_port.to_string().as_str())) @@ -1058,8 +1063,8 @@ fn network_port_and_discovery_port_flags_over_ipv4() { } #[test] fn network_port_and_discovery_port_flags_over_ipv6() { - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); - let disc6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp6_port = 0; + let disc6_port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("port", Some(tcp6_port.to_string().as_str())) @@ -1078,10 +1083,10 @@ fn network_port_and_discovery_port_flags_over_ipv6() { } #[test] fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); - let disc4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); - let disc6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp4_port = 0; + let disc4_port = 0; + let tcp6_port = 0; + let disc6_port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("listen-address", Some("127.0.0.1")) @@ -1113,12 +1118,12 @@ fn network_port_and_discovery_port_flags_over_ipv4_and_ipv6() { #[test] fn network_port_discovery_quic_port_flags_over_ipv4_and_ipv6() { - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); - let disc4_port = unused_udp4_port().expect("Unable to find unused port."); - let quic4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); - let disc6_port = unused_udp6_port().expect("Unable to find unused port."); - let quic6_port = unused_udp6_port().expect("Unable to find unused port."); + let tcp4_port = 0; + let disc4_port = 0; + let quic4_port = 0; + let tcp6_port = 0; + let disc6_port = 0; + let quic6_port = 0; CommandLineTest::new() .flag("listen-address", Some("::1")) .flag("listen-address", Some("127.0.0.1")) @@ -1264,7 +1269,8 @@ fn network_load_flag() { // Tests for ENR flags. #[test] fn enr_udp_port_flag() { - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; + assert!(port != 0); CommandLineTest::new() .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() @@ -1272,7 +1278,7 @@ fn enr_udp_port_flag() { } #[test] fn enr_quic_port_flag() { - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_QUIC_PORT; CommandLineTest::new() .flag("enr-quic-port", Some(port.to_string().as_str())) .run_with_zero_port() @@ -1280,7 +1286,7 @@ fn enr_quic_port_flag() { } #[test] fn enr_tcp_port_flag() { - let port = unused_tcp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_TCP_PORT; CommandLineTest::new() .flag("enr-tcp-port", Some(port.to_string().as_str())) .run_with_zero_port() @@ -1288,7 +1294,7 @@ fn enr_tcp_port_flag() { } #[test] fn enr_udp6_port_flag() { - let port = unused_udp6_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-udp6-port", Some(port.to_string().as_str())) .run_with_zero_port() @@ -1296,7 +1302,7 @@ fn enr_udp6_port_flag() { } #[test] fn enr_quic6_port_flag() { - let port = unused_udp6_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_QUIC_PORT; CommandLineTest::new() .flag("enr-quic6-port", Some(port.to_string().as_str())) .run_with_zero_port() @@ -1304,7 +1310,7 @@ fn enr_quic6_port_flag() { } #[test] fn enr_tcp6_port_flag() { - let port = unused_tcp6_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_TCP_PORT; CommandLineTest::new() .flag("enr-tcp6-port", Some(port.to_string().as_str())) .run_with_zero_port() @@ -1313,8 +1319,11 @@ fn enr_tcp6_port_flag() { #[test] fn enr_match_flag_over_ipv4() { let addr = "127.0.0.2".parse::().unwrap(); - let udp4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp4_port = DUMMY_ENR_UDP_PORT; + let tcp4_port = DUMMY_ENR_TCP_PORT; + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some("127.0.0.2")) @@ -1338,8 +1347,11 @@ fn enr_match_flag_over_ipv4() { fn enr_match_flag_over_ipv6() { const ADDR: &str = "::1"; let addr = ADDR.parse::().unwrap(); - let udp6_port = unused_udp6_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp6_port = DUMMY_ENR_UDP_PORT; + let tcp6_port = DUMMY_ENR_TCP_PORT; + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some(ADDR)) @@ -1362,13 +1374,18 @@ fn enr_match_flag_over_ipv6() { #[test] fn enr_match_flag_over_ipv4_and_ipv6() { const IPV6_ADDR: &str = "::1"; + + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp6_port = DUMMY_ENR_UDP_PORT; + let tcp6_port = DUMMY_ENR_TCP_PORT; let ipv6_addr = IPV6_ADDR.parse::().unwrap(); - let udp6_port = unused_udp6_port().expect("Unable to find unused port."); - let tcp6_port = unused_tcp6_port().expect("Unable to find unused port."); + const IPV4_ADDR: &str = "127.0.0.1"; + // the reason we use the ENR dummy values is because, due to the nature of the `--enr-match` flag, these will eventually become ENR ports (as well as listening ports). + let udp4_port = DUMMY_ENR_UDP_PORT; + let tcp4_port = DUMMY_ENR_TCP_PORT; let ipv4_addr = IPV4_ADDR.parse::().unwrap(); - let udp4_port = unused_udp4_port().expect("Unable to find unused port."); - let tcp4_port = unused_tcp4_port().expect("Unable to find unused port."); + CommandLineTest::new() .flag("enr-match", None) .flag("listen-address", Some(IPV4_ADDR)) @@ -1406,7 +1423,7 @@ fn enr_match_flag_over_ipv4_and_ipv6() { #[test] fn enr_address_flag_with_ipv4() { let addr = "192.167.1.1".parse::().unwrap(); - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) @@ -1419,7 +1436,7 @@ fn enr_address_flag_with_ipv4() { #[test] fn enr_address_flag_with_ipv6() { let addr = "192.167.1.1".parse::().unwrap(); - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("192.167.1.1")) .flag("enr-udp-port", Some(port.to_string().as_str())) @@ -1433,7 +1450,7 @@ fn enr_address_flag_with_ipv6() { fn enr_address_dns_flag() { let addr = Ipv4Addr::LOCALHOST; let ipv6addr = Ipv6Addr::LOCALHOST; - let port = unused_udp4_port().expect("Unable to find unused port."); + let port = DUMMY_ENR_UDP_PORT; CommandLineTest::new() .flag("enr-address", Some("localhost")) .flag("enr-udp-port", Some(port.to_string().as_str())) @@ -1482,8 +1499,8 @@ fn http_address_ipv6_flag() { } #[test] fn http_port_flag() { - let port1 = unused_tcp4_port().expect("Unable to find unused port."); - let port2 = unused_tcp4_port().expect("Unable to find unused port."); + let port1 = 0; + let port2 = 0; CommandLineTest::new() .flag("http", None) .flag("http-port", Some(port1.to_string().as_str())) @@ -1639,8 +1656,8 @@ fn metrics_address_ipv6_flag() { } #[test] fn metrics_port_flag() { - let port1 = unused_tcp4_port().expect("Unable to find unused port."); - let port2 = unused_tcp4_port().expect("Unable to find unused port."); + let port1 = 0; + let port2 = 0; CommandLineTest::new() .flag("metrics", None) .flag("metrics-port", Some(port1.to_string().as_str())) From 8a1b77bf8956656cc585c2bd3079729f27aaa594 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 3 Oct 2023 06:33:15 +0000 Subject: [PATCH 03/15] Ultra Fast Super Slick CI (#4755) Attempting to improve our CI speeds as its recently been a pain point. Major changes: - Use a github action to pull stable/nightly rust rather than building it each run - Shift test suite to `nexttest` https://github.com/nextest-rs/nextest for CI UPDATE: So I've iterated on some changes, and although I think its still not optimal I think this is a good base to start from. Some extra things in this PR: - Shifted where we pull rust from. We're now using this thing: https://github.com/moonrepo/setup-rust . It's got some interesting cache's built in, but was not seeing the gains that Jimmy managed to get. In either case tho, it can pull rust, cargofmt, clippy, cargo nexttest all in < 5s. So I think it's worthwhile. - I've grouped a few of the check-like tests into a single test called `code-test`. Although we were using github runners in parallel which may be faster, it just seems wasteful. There were like 4-5 tests, where we would pull lighthouse, compile it, then run an action, like clippy, cargo-audit or fmt. I've grouped these into a single action, so we only compile lighthouse once, then in each step we run the checks. This avoids compiling lighthouse like 5 times. - Ive made doppelganger tests run on our local machines to avoid pulling foundry, building and making lcli which are all now baked into the images. - We have sccache and do not incremental compile lighthouse Misc bonus things: - Cargo update - Fix web3 signer openssl keys which is required after a cargo update - Use mock_instant in an LRU cache test to avoid non-deterministic test - Remove race condition in building web3signer tests There's still some things we could improve on. Such as downloading the EF tests every run and the web3-signer binary, but I've left these to be out of scope of this PR. I think the above are meaningful improvements. Co-authored-by: Paul Hauner Co-authored-by: realbigsean Co-authored-by: antondlr --- .config/nextest.toml | 113 +++++++ .github/workflows/test-suite.yml | 281 ++++++++++-------- Cargo.lock | 254 +++++++--------- Makefile | 37 ++- bors.toml | 1 + common/lru_cache/Cargo.toml | 3 + common/lru_cache/src/time.rs | 14 +- .../src/per_block_processing/tests.rs | 2 +- testing/web3signer_tests/tls/generate.sh | 4 +- .../web3signer_tests/tls/lighthouse/cert.pem | 52 ++-- .../web3signer_tests/tls/lighthouse/key.key | 100 +++---- .../web3signer_tests/tls/lighthouse/key.p12 | Bin 4371 -> 4371 bytes .../tls/lighthouse/web3signer.pem | 52 ++-- .../web3signer_tests/tls/web3signer/cert.pem | 52 ++-- .../web3signer_tests/tls/web3signer/key.key | 100 +++---- .../web3signer_tests/tls/web3signer/key.p12 | Bin 4371 -> 4371 bytes .../tls/web3signer/known_clients.txt | 2 +- validator_client/slashing_protection/Makefile | 17 +- .../slashing_protection/tests/interop.rs | 7 +- 19 files changed, 616 insertions(+), 475 deletions(-) create mode 100644 .config/nextest.toml diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 00000000000..b701259fc2a --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,113 @@ +# This is the default config used by nextest. It is embedded in the binary at +# build time. It may be used as a template for .config/nextest.toml. + +[store] +# The directory under the workspace root at which nextest-related files are +# written. Profile-specific storage is currently written to dir/. +dir = "target/nextest" + +# This section defines the default nextest profile. Custom profiles are layered +# on top of the default profile. +[profile.default] +# "retries" defines the number of times a test should be retried. If set to a +# non-zero value, tests that succeed on a subsequent attempt will be marked as +# non-flaky. Can be overridden through the `--retries` option. +# Examples +# * retries = 3 +# * retries = { backoff = "fixed", count = 2, delay = "1s" } +# * retries = { backoff = "exponential", count = 10, delay = "1s", jitter = true, max-delay = "10s" } +retries = 0 + +# The number of threads to run tests with. Supported values are either an integer or +# the string "num-cpus". Can be overridden through the `--test-threads` option. +test-threads = "num-cpus" + +# The number of threads required for each test. This is generally used in overrides to +# mark certain tests as heavier than others. However, it can also be set as a global parameter. +threads-required = 1 + +# Show these test statuses in the output. +# +# The possible values this can take are: +# * none: no output +# * fail: show failed (including exec-failed) tests +# * retry: show flaky and retried tests +# * slow: show slow tests +# * pass: show passed tests +# * skip: show skipped tests (most useful for CI) +# * all: all of the above +# +# Each value includes all the values above it; for example, "slow" includes +# failed and retried tests. +# +# Can be overridden through the `--status-level` flag. +status-level = "pass" + +# Similar to status-level, show these test statuses at the end of the run. +final-status-level = "flaky" + +# "failure-output" defines when standard output and standard error for failing tests are produced. +# Accepted values are +# * "immediate": output failures as soon as they happen +# * "final": output failures at the end of the test run +# * "immediate-final": output failures as soon as they happen and at the end of +# the test run; combination of "immediate" and "final" +# * "never": don't output failures at all +# +# For large test suites and CI it is generally useful to use "immediate-final". +# +# Can be overridden through the `--failure-output` option. +failure-output = "immediate" + +# "success-output" controls production of standard output and standard error on success. This should +# generally be set to "never". +success-output = "never" + +# Cancel the test run on the first failure. For CI runs, consider setting this +# to false. +fail-fast = true + +# Treat a test that takes longer than the configured 'period' as slow, and print a message. +# See for more information. +# +# Optional: specify the parameter 'terminate-after' with a non-zero integer, +# which will cause slow tests to be terminated after the specified number of +# periods have passed. +# Example: slow-timeout = { period = "60s", terminate-after = 2 } +slow-timeout = { period = "120s" } + +# Treat a test as leaky if after the process is shut down, standard output and standard error +# aren't closed within this duration. +# +# This usually happens in case of a test that creates a child process and lets it inherit those +# handles, but doesn't clean the child process up (especially when it fails). +# +# See for more information. +leak-timeout = "100ms" + +[profile.default.junit] +# Output a JUnit report into the given file inside 'store.dir/'. +# If unspecified, JUnit is not written out. + +# path = "junit.xml" + +# The name of the top-level "report" element in JUnit report. If aggregating +# reports across different test runs, it may be useful to provide separate names +# for each report. +report-name = "lighthouse-run" + +# Whether standard output and standard error for passing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +store-success-output = false + +# Whether standard output and standard error for failing tests should be stored in the JUnit report. +# Output is stored in the and elements of the element. +# +# Note that if a description can be extracted from the output, it is always stored in the +# element. +store-failure-output = true + +# This profile is activated if MIRI_SYSROOT is set. +[profile.default-miri] +# Miri tests take up a lot of memory, so only run 1 test at a time by default. +test-threads = 4 diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index fd9b77ae2f9..a296cc8491c 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -18,14 +18,14 @@ env: # Deny warnings in CI # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) RUSTFLAGS: "-D warnings -C debuginfo=0" - # The Nightly version used for cargo-udeps, might need updating from time to time. - PINNED_NIGHTLY: nightly-2023-04-16 # Prevent Github API rate limiting. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} # Self-hosted runners need to reference a different host for `./watch` tests. WATCH_HOST: ${{ github.repository == 'sigp/lighthouse' && 'host.docker.internal' || 'localhost' }} + # Disable incremental compilation + CARGO_INCREMENTAL: 0 jobs: target-branch-check: name: target-branch-check @@ -34,145 +34,176 @@ jobs: steps: - name: Check that the pull request is not targeting the stable branch run: test ${{ github.base_ref }} != "stable" - extract-msrv: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Extract Minimum Supported Rust Version (MSRV) - run: | - metadata=$(cargo metadata --no-deps --format-version 1) - msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') - echo "MSRV=$msrv" >> $GITHUB_OUTPUT - id: extract_msrv - outputs: - MSRV: ${{ steps.extract_msrv.outputs.MSRV }} - cargo-fmt: - name: cargo-fmt - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Check formatting with cargo fmt - run: make cargo-fmt release-tests-ubuntu: name: release-tests-ubuntu # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in release - run: make test-release + run: make nextest-release + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats release-tests-windows: name: release-tests-windows runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }} - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Install make + if: env.SELF_HOSTED_RUNNERS == 'false' run: choco install -y make - - uses: KyleMayes/install-llvm-action@v1 - if: env.SELF_HOSTED_RUNNERS == false - with: - version: "15.0" - directory: ${{ runner.temp }}/llvm +# - uses: KyleMayes/install-llvm-action@v1 +# if: env.SELF_HOSTED_RUNNERS == 'false' +# with: +# version: "15.0" +# directory: ${{ runner.temp }}/llvm - name: Set LIBCLANG_PATH run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - name: Run tests in release - run: make test-release + run: make nextest-release + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats beacon-chain-tests: name: beacon-chain-tests # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run beacon_chain tests for all known forks run: make test-beacon-chain + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats op-pool-tests: name: op-pool-tests runs-on: ubuntu-latest - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run operation_pool tests for all known forks run: make test-op-pool slasher-tests: name: slasher-tests runs-on: ubuntu-latest - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run slasher tests for all supported backends run: make test-slasher debug-tests-ubuntu: name: debug-tests-ubuntu # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + bins: cargo-nextest - name: Install Foundry (anvil) + if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in debug - run: make test-debug + run: make nextest-debug + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats state-transition-vectors-ubuntu: name: state-transition-vectors-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Run state_transition_vectors in release. run: make run-state-transition-tests ef-tests-ubuntu: name: ef-tests-ubuntu # Use self-hosted runners only on the sigp repo. runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} - needs: cargo-fmt + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == false - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest - name: Run consensus-spec-tests with blst, milagro and fake_crypto - run: make test-ef + run: make nextest-ef + - name: Show cache stats + if: env.SELF_HOSTED_RUNNERS == 'true' + run: sccache --show-stats dockerfile-ubuntu: name: dockerfile-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - name: Build the root Dockerfile run: docker build --build-arg FEATURES=portable -t lighthouse:local . - name: Test the built image @@ -180,11 +211,13 @@ jobs: eth1-simulator-ubuntu: name: eth1-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 with: @@ -194,11 +227,13 @@ jobs: merge-transition-ubuntu: name: merge-transition-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 with: @@ -208,21 +243,25 @@ jobs: no-eth1-simulator-ubuntu: name: no-eth1-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Run the beacon chain sim without an eth1 connection run: cargo run --release --bin simulator no-eth1-sim syncing-simulator-ubuntu: name: syncing-simulator-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install Foundry (anvil) uses: foundry-rs/foundry-toolchain@v1 with: @@ -231,21 +270,27 @@ jobs: run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: name: doppelganger-protection-test - runs-on: ubuntu-latest - needs: cargo-fmt + runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + if: env.SELF_HOSTED_RUNNERS == 'false' + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release - name: Install geth + if: env.SELF_HOSTED_RUNNERS == 'false' run: | sudo add-apt-repository -y ppa:ethereum/ethereum sudo apt-get update sudo apt-get install ethereum - - name: Install lighthouse and lcli + - name: Install lighthouse run: | make - make install-lcli + - name: Install lcli + if: env.SELF_HOSTED_RUNNERS == 'false' + run: make install-lcli - name: Run the doppelganger protection failure test script run: | cd scripts/tests @@ -257,89 +302,71 @@ jobs: execution-engine-integration-ubuntu: name: execution-engine-integration-ubuntu runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: '1.20' - - uses: actions/setup-dotnet@v3 - with: - dotnet-version: '6.0.201' - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + cache: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Run exec engine integration tests in release run: make test-exec-engine - check-benchmarks: - name: check-benchmarks + check-code: + name: check-code runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Typecheck benchmark code without running it - run: make check-benches - clippy: - name: clippy - runs-on: ubuntu-latest - needs: cargo-fmt + env: + CARGO_INCREMENTAL: 1 steps: - uses: actions/checkout@v3 - name: Get latest version of stable Rust - run: rustup update stable + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + components: rustfmt,clippy + bins: cargo-audit + - name: Check formatting with cargo fmt + run: make cargo-fmt - name: Lint code for quality and style with Clippy run: make lint - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock + - name: Typecheck benchmark code without running it + run: make check-benches + - name: Validate state_processing feature arbitrary-fuzz + run: make arbitrary-fuzz + - name: Run cargo audit + run: make audit-CI + - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose + run: CARGO_HOME=$(readlink -f $HOME) make vendor check-msrv: name: check-msrv runs-on: ubuntu-latest - needs: [cargo-fmt, extract-msrv] steps: - uses: actions/checkout@v3 - - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) - run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} + - name: Install Rust at Minimum Supported Rust Version (MSRV) + run: | + metadata=$(cargo metadata --no-deps --format-version 1) + msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') + rustup override set $msrv - name: Run cargo check run: cargo check --workspace - arbitrary-check: - name: arbitrary-check - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Validate state_processing feature arbitrary-fuzz - run: make arbitrary-fuzz - cargo-audit: - name: cargo-audit - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database - run: make audit - cargo-vendor: - name: cargo-vendor - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v3 - - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose - run: CARGO_HOME=$(readlink -f $HOME) make vendor cargo-udeps: name: cargo-udeps runs-on: ubuntu-latest - needs: cargo-fmt steps: - uses: actions/checkout@v3 - - name: Install Rust (${{ env.PINNED_NIGHTLY }}) - run: rustup toolchain install $PINNED_NIGHTLY - - name: Install cargo-udeps - run: cargo install cargo-udeps --locked --force + - name: Get latest version of nightly Rust + uses: moonrepo/setup-rust@v1 + with: + channel: nightly + bins: cargo-udeps + cache: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config diff --git a/Cargo.lock b/Cargo.lock index 90a5373dbb5..2a8fbdd74ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -153,9 +153,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2135563fb5c609d2b2b87c1e8ce7bc41b0b45430fa9661f457981503dd5bf0" +checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" dependencies = [ "memchr", ] @@ -225,15 +225,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" -[[package]] -name = "array-init" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23589ecb866b460d3a0f1278834750268c607e8e28a1b982c907219f3178cd72" -dependencies = [ - "nodrop", -] - [[package]] name = "arrayref" version = "0.3.7" @@ -578,7 +569,7 @@ dependencies = [ "slog", "sloggers", "slot_clock", - "smallvec 1.11.0", + "smallvec", "ssz_types", "state_processing", "store", @@ -903,7 +894,7 @@ dependencies = [ "ethereum_ssz_derive", "quickcheck", "quickcheck_macros", - "smallvec 1.11.0", + "smallvec", "ssz_types", "tree_hash", ] @@ -1138,9 +1129,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400" dependencies = [ "crossbeam-utils", ] @@ -1400,9 +1391,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.0" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622178105f911d937a42cdb140730ba4a3ed2becd8ae6ce39c7d28b5d75d4588" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" dependencies = [ "cfg-if", "cpufeatures", @@ -1538,7 +1529,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" dependencies = [ "futures", - "tokio-util 0.7.8", + "tokio-util 0.7.9", ] [[package]] @@ -1633,9 +1624,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d98235fdc2f355d330a8244184ab6b4b33c28679c0b4158f63138e51d6cf7e88" +checksum = "53c8a2cb22327206568569e5a45bb5a2c946455efdd76e24d15b7e82171af95e" dependencies = [ "bitflags 2.4.0", "byteorder", @@ -1647,9 +1638,9 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e054665eaf6d97d1e7125512bb2d35d07c73ac86cc6920174cb42d1ab697a554" +checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" dependencies = [ "diesel_table_macro_syntax", "proc-macro2", @@ -1758,7 +1749,7 @@ dependencies = [ "aes-gcm", "arrayvec", "delay_map", - "enr 0.9.0", + "enr 0.9.1", "fnv", "futures", "hashlink 0.7.0", @@ -1772,7 +1763,7 @@ dependencies = [ "parking_lot 0.11.2", "rand", "rlp", - "smallvec 1.11.0", + "smallvec", "socket2 0.4.9", "tokio", "tracing", @@ -1846,7 +1837,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" dependencies = [ - "curve25519-dalek 4.1.0", + "curve25519-dalek 4.1.1", "ed25519", "rand_core 0.6.4", "serde", @@ -1961,9 +1952,9 @@ dependencies = [ [[package]] name = "enr" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0be7b2ac146c1f99fe245c02d16af0696450d8e06c135db75e10eeb9e642c20d" +checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" dependencies = [ "base64 0.21.4", "bytes", @@ -1974,7 +1965,6 @@ dependencies = [ "rand", "rlp", "serde", - "serde-hex", "sha3 0.10.8", "zeroize", ] @@ -2401,7 +2391,7 @@ checksum = "e61ffea29f26e8249d35128a82ec8d3bd4fbc80179ea5f5e5e3daafef6a80fcb" dependencies = [ "ethereum-types 0.14.1", "itertools", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -2665,9 +2655,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "ff" @@ -3106,7 +3096,7 @@ dependencies = [ "indexmap 1.9.3", "slab", "tokio", - "tokio-util 0.7.8", + "tokio-util 0.7.9", "tracing", ] @@ -3236,9 +3226,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hex" @@ -3711,7 +3701,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.3", "libc", "windows-sys 0.48.0", ] @@ -4047,9 +4037,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.40.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef7dd7b09e71aac9271c60031d0e558966cdb3253ba0308ab369bb2de80630d0" +checksum = "dd44289ab25e4c9230d9246c475a22241e301b23e8f4061d3bdef304a1a99713" dependencies = [ "either", "fnv", @@ -4067,7 +4057,7 @@ dependencies = [ "quick-protobuf", "rand", "rw-stream-sink", - "smallvec 1.11.0", + "smallvec", "thiserror", "unsigned-varint 0.7.2", "void", @@ -4084,7 +4074,7 @@ dependencies = [ "libp2p-identity", "log", "parking_lot 0.12.1", - "smallvec 1.11.0", + "smallvec", "trust-dns-resolver", ] @@ -4115,7 +4105,7 @@ dependencies = [ "rand", "regex", "sha2 0.10.7", - "smallvec 1.11.0", + "smallvec", "unsigned-varint 0.7.2", "void", ] @@ -4137,7 +4127,7 @@ dependencies = [ "lru 0.10.1", "quick-protobuf", "quick-protobuf-codec", - "smallvec 1.11.0", + "smallvec", "thiserror", "void", ] @@ -4178,7 +4168,7 @@ dependencies = [ "libp2p-swarm", "log", "rand", - "smallvec 1.11.0", + "smallvec", "socket2 0.5.4", "tokio", "trust-dns-proto", @@ -4216,7 +4206,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.12.1", "rand", - "smallvec 1.11.0", + "smallvec", "unsigned-varint 0.7.2", ] @@ -4227,7 +4217,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71ce70757f2c0d82e9a3ef738fb10ea0723d16cec37f078f719e2c247704c1bb" dependencies = [ "bytes", - "curve25519-dalek 4.1.0", + "curve25519-dalek 4.1.1", "futures", "libp2p-core", "libp2p-identity", @@ -4286,9 +4276,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.43.3" +version = "0.43.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28016944851bd73526d3c146aabf0fa9bbe27c558f080f9e5447da3a1772c01a" +checksum = "f0cf749abdc5ca1dce6296dc8ea0f012464dfcfd3ddd67ffc0cabd8241c4e1da" dependencies = [ "either", "fnv", @@ -4302,7 +4292,7 @@ dependencies = [ "multistream-select", "once_cell", "rand", - "smallvec 1.11.0", + "smallvec", "tokio", "void", ] @@ -4525,7 +4515,7 @@ dependencies = [ "slog", "slog-async", "slog-term", - "smallvec 1.11.0", + "smallvec", "snap", "ssz_types", "strum", @@ -4672,6 +4662,7 @@ name = "lru_cache" version = "0.1.0" dependencies = [ "fnv", + "mock_instant", ] [[package]] @@ -4724,22 +4715,17 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "md-5" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ + "cfg-if", "digest 0.10.7", ] @@ -4815,7 +4801,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "smallvec 1.11.0", + "smallvec", "syn 1.0.109", ] @@ -4915,6 +4901,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "mock_instant" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c1a54de846c4006b88b1516731cc1f6026eb5dc4bcb186aa071ef66d40524ec" + [[package]] name = "monitoring_api" version = "0.1.0" @@ -5036,7 +5028,7 @@ dependencies = [ "futures", "log", "pin-project", - "smallvec 1.11.0", + "smallvec", "unsigned-varint 0.7.2", ] @@ -5161,7 +5153,7 @@ dependencies = [ "slog-term", "sloggers", "slot_clock", - "smallvec 1.11.0", + "smallvec", "ssz_types", "store", "strum", @@ -5223,12 +5215,6 @@ dependencies = [ "validator_dir", ] -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - [[package]] name = "nohash-hasher" version = "0.2.0" @@ -5289,7 +5275,7 @@ dependencies = [ "num-traits", "rand", "serde", - "smallvec 1.11.0", + "smallvec", "zeroize", ] @@ -5329,7 +5315,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.3", "libc", ] @@ -5444,9 +5430,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.1.3+3.1.2" +version = "300.1.5+3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd2c101a165fff9935e34def4669595ab1c7847943c42be86e21503e482be107" +checksum = "559068e4c12950d7dcaa1857a61725c0d38d4fc03ff8e070ab31a75d6e316491" dependencies = [ "cc", ] @@ -5560,9 +5546,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "e52c774a4c39359c1d1c52e43f73dd91a75a614652c825408eec30c95a9b2067" [[package]] name = "parking_lot" @@ -5595,7 +5581,7 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.11.0", + "smallvec", "winapi", ] @@ -5608,7 +5594,7 @@ dependencies = [ "cfg-if", "libc", "redox_syscall 0.3.5", - "smallvec 1.11.0", + "smallvec", "windows-targets 0.48.5", ] @@ -6275,9 +6261,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" dependencies = [ "either", "rayon-core", @@ -6285,14 +6271,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", ] [[package]] @@ -6413,7 +6397,7 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls", - "tokio-util 0.7.8", + "tokio-util 0.7.9", "tower-service", "url", "wasm-bindgen", @@ -6533,7 +6517,7 @@ dependencies = [ "fallible-streaming-iterator", "hashlink 0.8.4", "libsqlite3-sys", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -6602,9 +6586,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.13" +version = "0.38.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" +checksum = "747c788e9ce8e92b12cd485c49ddf90723550b654b32508f979b71a7b1ecda4f" dependencies = [ "bitflags 2.4.0", "errno", @@ -6636,9 +6620,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.5" +version = "0.101.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a27e3b59326c16e23d30aeb7a36a24cc0d29e71d68ff611cdfb4a01d013bed" +checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" dependencies = [ "ring", "untrusted", @@ -6818,9 +6802,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" dependencies = [ "serde", ] @@ -6848,17 +6832,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde-hex" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca37e3e4d1b39afd7ff11ee4e947efae85adfddf4841787bfa47c470e96dc26d" -dependencies = [ - "array-init", - "serde", - "smallvec 0.6.14", -] - [[package]] name = "serde_array_query" version = "0.1.0" @@ -6979,9 +6952,9 @@ dependencies = [ [[package]] name = "sha1" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", @@ -7303,18 +7276,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "0.6.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = [ - "maybe-uninit", -] - -[[package]] -name = "smallvec" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "snap" @@ -7331,7 +7295,7 @@ dependencies = [ "aes-gcm", "blake2", "chacha20poly1305", - "curve25519-dalek 4.1.0", + "curve25519-dalek 4.1.1", "rand_core 0.6.4", "ring", "rustc_version", @@ -7423,7 +7387,7 @@ dependencies = [ "itertools", "serde", "serde_derive", - "smallvec 1.11.0", + "smallvec", "tree_hash", "typenum", ] @@ -7448,7 +7412,7 @@ dependencies = [ "merkle_proof", "rayon", "safe_arith", - "smallvec 1.11.0", + "smallvec", "ssz_types", "tokio", "tree_hash", @@ -7559,7 +7523,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "smallvec 1.11.0", + "smallvec", "syn 1.0.109", ] @@ -7706,9 +7670,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if", - "fastrand 2.0.0", + "fastrand 2.0.1", "redox_syscall 0.3.5", - "rustix 0.38.13", + "rustix 0.38.14", "windows-sys 0.48.0", ] @@ -7725,9 +7689,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" dependencies = [ "winapi-util", ] @@ -7814,9 +7778,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" +checksum = "426f806f4089c493dcac0d24c29c01e2c38baf8e30f1b716ee37e83d200b18fe" dependencies = [ "deranged", "itoa", @@ -7829,15 +7793,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" +checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" dependencies = [ "time-core", ] @@ -7978,7 +7942,7 @@ dependencies = [ "rand", "socket2 0.5.4", "tokio", - "tokio-util 0.7.8", + "tokio-util 0.7.9", "whoami", ] @@ -8001,7 +7965,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util 0.7.8", + "tokio-util 0.7.9", ] [[package]] @@ -8022,9 +7986,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" dependencies = [ "bytes", "futures-core", @@ -8172,7 +8136,7 @@ dependencies = [ "once_cell", "regex", "sharded-slab", - "smallvec 1.11.0", + "smallvec", "thread_local", "tracing", "tracing-core", @@ -8206,7 +8170,7 @@ checksum = "5c998ac5fe2b07c025444bdd522e6258110b63861c6698eedc610c071980238d" dependencies = [ "ethereum-types 0.14.1", "ethereum_hashing", - "smallvec 1.11.0", + "smallvec", ] [[package]] @@ -8247,7 +8211,7 @@ dependencies = [ "ipnet", "lazy_static", "rand", - "smallvec 1.11.0", + "smallvec", "socket2 0.4.9", "thiserror", "tinyvec", @@ -8269,7 +8233,7 @@ dependencies = [ "lru-cache", "parking_lot 0.12.1", "resolv-conf", - "smallvec 1.11.0", + "smallvec", "thiserror", "tokio", "tracing", @@ -8328,7 +8292,7 @@ dependencies = [ "serde_with", "serde_yaml", "slog", - "smallvec 1.11.0", + "smallvec", "ssz_types", "state_processing", "strum", @@ -8392,9 +8356,9 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unicode-xid" @@ -8599,9 +8563,9 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "waker-fn" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" [[package]] name = "walkdir" @@ -8646,7 +8610,7 @@ dependencies = [ "tokio", "tokio-rustls", "tokio-stream", - "tokio-util 0.7.8", + "tokio-util 0.7.9", "tower-service", "tracing", ] @@ -8898,9 +8862,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] @@ -9190,9 +9154,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.18" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab77e97b50aee93da431f2cee7cd0f43b4d1da3c408042f2d7d164187774f0a" +checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" [[package]] name = "xmltree" diff --git a/Makefile b/Makefile index 7bed5732b6a..8f744e03c5a 100644 --- a/Makefile +++ b/Makefile @@ -108,11 +108,21 @@ build-release-tarballs: test-release: cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher +# Runs the full workspace tests in **release**, without downloading any additional +# test vectors, using nextest. +nextest-release: + cargo nextest run --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher + # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. test-debug: cargo test --workspace --exclude ef_tests --exclude beacon_chain +# Runs the full workspace tests in **debug**, without downloading any additional test +# vectors, using nextest. +nextest-debug: + cargo nextest run --workspace --exclude ef_tests --exclude beacon_chain + # Runs cargo-fmt (linter). cargo-fmt: cargo fmt --all -- --check @@ -129,25 +139,33 @@ run-ef-tests: cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests +# Runs EF test vectors with nextest +nextest-run-ef-tests: + rm -rf $(EF_TESTS)/.accessed_file_log.txt + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)" + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto" + cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro" + ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests + # Run the tests in the `beacon_chain` crate for all known forks. test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) test-beacon-chain-%: - env FORK_NAME=$* cargo test --release --features fork_from_env,slasher/lmdb -p beacon_chain + env FORK_NAME=$* cargo nextest run --release --features fork_from_env,slasher/lmdb -p beacon_chain # Run the tests in the `operation_pool` crate for all known forks. test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) test-op-pool-%: - env FORK_NAME=$* cargo test --release \ + env FORK_NAME=$* cargo nextest run --release \ --features 'beacon_chain/fork_from_env'\ -p operation_pool # Run the tests in the `slasher` crate for all supported database backends. test-slasher: - cargo test --release -p slasher --features lmdb - cargo test --release -p slasher --no-default-features --features mdbx - cargo test --release -p slasher --features lmdb,mdbx # both backends enabled + cargo nextest run --release -p slasher --features lmdb + cargo nextest run --release -p slasher --no-default-features --features mdbx + cargo nextest run --release -p slasher --features lmdb,mdbx # both backends enabled # Runs only the tests/state_transition_vectors tests. run-state-transition-tests: @@ -156,6 +174,9 @@ run-state-transition-tests: # Downloads and runs the EF test vectors. test-ef: make-ef-tests run-ef-tests +# Downloads and runs the EF test vectors with nextest. +nextest-ef: make-ef-tests nextest-run-ef-tests + # Runs tests checking interop between Lighthouse and execution clients. test-exec-engine: make -C $(EXECUTION_ENGINE_INTEGRATION) test @@ -205,8 +226,12 @@ arbitrary-fuzz: cargo check -p slashing_protection --features arbitrary-fuzz # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) -audit: +audit: install-audit audit-CI + +install-audit: cargo install --force cargo-audit + +audit-CI: cargo audit # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. diff --git a/bors.toml b/bors.toml index 3782ef4db30..e821b89a813 100644 --- a/bors.toml +++ b/bors.toml @@ -8,6 +8,7 @@ status = [ "eth1-simulator-ubuntu", "merge-transition-ubuntu", "no-eth1-simulator-ubuntu", + "check-code", "cargo-udeps", "beacon-chain-tests", "op-pool-tests", diff --git a/common/lru_cache/Cargo.toml b/common/lru_cache/Cargo.toml index 73c623ed443..c1bd15f9f8b 100644 --- a/common/lru_cache/Cargo.toml +++ b/common/lru_cache/Cargo.toml @@ -6,3 +6,6 @@ edition = { workspace = true } [dependencies] fnv = { workspace = true } + +[dev-dependencies] +mock_instant = "0.3" diff --git a/common/lru_cache/src/time.rs b/common/lru_cache/src/time.rs index 966741ca4dd..0b2fd835687 100644 --- a/common/lru_cache/src/time.rs +++ b/common/lru_cache/src/time.rs @@ -1,7 +1,13 @@ //! This implements a time-based LRU cache for fast checking of duplicates use fnv::FnvHashSet; +#[cfg(test)] +use mock_instant::Instant; use std::collections::VecDeque; -use std::time::{Duration, Instant}; + +#[cfg(not(test))] +use std::time::Instant; + +use std::time::Duration; struct Element { /// The key being inserted. @@ -222,16 +228,16 @@ mod test { cache.insert("a"); cache.insert("b"); - std::thread::sleep(Duration::from_millis(20)); + mock_instant::MockClock::advance(Duration::from_millis(20)); cache.insert("a"); // a is newer now - std::thread::sleep(Duration::from_millis(85)); + mock_instant::MockClock::advance(Duration::from_millis(85)); assert!(cache.contains(&"a"),); // b was inserted first but was not as recent it should have been removed assert!(!cache.contains(&"b")); - std::thread::sleep(Duration::from_millis(16)); + mock_instant::MockClock::advance(Duration::from_millis(16)); assert!(!cache.contains(&"a")); } } diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 16fa2462f5e..df5aa9f7a60 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1,4 +1,4 @@ -#![cfg(all(test, not(feature = "fake_crypto")))] +#![cfg(all(test, not(feature = "fake_crypto"), not(debug_assertions)))] use crate::per_block_processing::errors::{ AttestationInvalid, AttesterSlashingInvalid, BlockOperationError, BlockProcessingError, diff --git a/testing/web3signer_tests/tls/generate.sh b/testing/web3signer_tests/tls/generate.sh index f00e7b7e37a..f918e87cf82 100755 --- a/testing/web3signer_tests/tls/generate.sh +++ b/testing/web3signer_tests/tls/generate.sh @@ -1,7 +1,7 @@ #!/bin/bash openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && -openssl pkcs12 -export -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && +openssl pkcs12 -export -aes256 -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && cp web3signer/cert.pem lighthouse/web3signer.pem && openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && -openssl pkcs12 -export -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl pkcs12 -export -aes256 -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && openssl x509 -noout -fingerprint -sha256 -inform pem -in lighthouse/cert.pem | cut -b 20-| sed "s/^/lighthouse /" > web3signer/known_clients.txt diff --git a/testing/web3signer_tests/tls/lighthouse/cert.pem b/testing/web3signer_tests/tls/lighthouse/cert.pem index 5746d19a165..24b0a2e5c0e 100644 --- a/testing/web3signer_tests/tls/lighthouse/cert.pem +++ b/testing/web3signer_tests/tls/lighthouse/cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUELASgYwStCn/u/8tPByRADyCwLEwDQYJKoZIhvcNAQEL +MIIFujCCA6KgAwIBAgIUXZijYo8W4/9dAq58ocFEbZDxohwwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDApsaWdodGhvdXNlMCAXDTIzMDkyMjAzMDA1N1oYDzIxMjMwODI5MDMwMDU3 +VQQDDApsaWdodGhvdXNlMCAXDTIzMDkyMDAyNTYzNloYDzIxMjMwODI3MDI1NjM2 WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCc -i30cib5B/B5QNd8grzi4LxmlyfZFi3VfpukwdwOD1Xk3ODk1OtjAzhK46YhDclvc -u98m1Dnib1Z+eTjRuEEoekIxz2+BbOle7G52LNvuDZpD+HKucqIU3TnEKPPuTYPp -lZ1n/9EyxXUwD5uTkn7xXzK8UFXUt73j6I6VFMdHlNcwLcx8KSwBDzvnGT4ew/UL -+ThON3j5rIT+nFHDcC2zoM+6ANdVkL6GHid4/cOcYW6GxB9TRZtEasqze41bC+kX -ZtPlV5V2nilAzVj8z9ynwBpHkLH+E6sMUhSEwA++QfI1gGf0FmSBgSIZ3RdPo/dp -hkLG8fZXKMkMzKkRm5hcstDP6DnTIYl+CfuVez5gZ0/yelAqXNvTqMKuDhHTTRRY -aOXZX4BAiQO2Q6a6WYLe87E2ka5AF2T2y/BPeXjUwDS/1mFIB3FUGlMLVJt8/RLz -nXVGoSsYapttiiPucQbMPEysCJ4/LZ9zxe3EDWWjpurLHGi/Y/dVziEvg1Eoycix -dZogKz0QVCz4++QI0kPDDX7So7CWni2JJuYguF/8CX8QbCT2L8jXf0uQrq76FLKj -88A7lS8DzXBt/pRryiIlDyLenJwHmrv6p+P/FYvgnJHvAEtTynxYm5GA16YWy+Dj -c5XVgNHjV4TdX3GueAp+NhBBaHDFvYCbP/oXkRvNRQIDAQABo1QwUjALBgNVHQ8E +BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC1 +R1M9NnRwUsqFvJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52 +aHSA2fs2KyeA61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDE +jf0ogUVM9TCEt6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAw +Oz1d8/fxYJvIpT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5Fe +V0fPth+e9XMAH7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI +0vps1zF9Bo8QewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWM +VcR//EtbOZGqzGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr +67Vyi9SWSM6rdRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91f +kpT6kjc6d2h4bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa +3zLeqd89dS7HNLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcf +TPFe8xuDYsi155veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABo1QwUjALBgNVHQ8E BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQUoeeF4G1qTRzLvO583qitbNDzr10wDQYJKoZIhvcNAQELBQADggIBAA9Y -YZP0pZLyovSnjyyuTR4KE9B+TSwqHe/LvH+7EAXLH+cwhyS7ADfJyt3mOCbKHZSo -dmJ5KWQ6M2Xn9Wq40BPk8mQPmAxy0nHg5beG03HYXOIsK8zgXTMad1+D1jnHPAda -ldXJ2Y+ljx4TDXKCWpTaq1+flqgRD3t98tOLuiULZ5jsTFX8Xbun7matcjziU5Lo -GWVQPWkb8Vx+3QyfbfiYJ7hggfYTxQsVJOXKuD8k2FMtKn5oTp3VwD2kY1q2X2Yk -HsDZJdYrvjWi2LcZDKoSNeusuLrv1XoUnwsAa3ng6drvoEU16vfILLYqH820UJ61 -/fFm3a9BFHRvPVd/WcSeIVc9jx9+32RIVxlppwCINnGMGE20kUZxu0TiMjTX9bCp -AouDuhwMt7z5jiQIi/CMxN6IlHBeVLqyK8ayWvH40xYgZTXlePpmLcQhcieNk7oJ -ard9jMfj4JhH5GbLXVptMBVJ0f9Ql4rW3EyNipvVKdkgTUNIeVm7LyUK220aT7ty -a0pGWHHViiF1MjGExo0P3gjZIML32TjZWlG3Nts5NAiyXDo4f78VeLyZQ7efVkub -GpjMf89vrmPdQhssoFr8fRFQObDe7hgxkgeiw9jgHItJl2/MWAxfsHV18HwiBqGW -QzaZR995YhU480jvA5XR8+EB6QUZeCEKunW8WK/F +HQ4EFgQU6r7QHkcEsWhEZHpcMpGxwKXQL9swDQYJKoZIhvcNAQELBQADggIBACyO +8xzqotye1J6xhDQCQnQF3dXaPTqfT31Ypg8UeU25V9N+bZO04CJKlOblukuvkedE +x1RDeqG3A81D4JOgTGFmFVoEF4iTk3NBrsHuMzph6ImHTd3TD+5iG5a3GL0i9PAI +dHTT6z6t2wlayjmHotqQ+N4A4msx8IPBRULcCmId319gpSDHsvt2wYbLdh+d9E2h +vI0VleJpJ7eoy05842VTkFJebriSpi75yFphKUnyAKlONiMN3o6eg90wpWdI+1rQ +js5lfm+pxYw8H6eSf+rl30m+amrxUlooqrSCHNVSO2c4+W5m/r3JfOiRqVUTxaO8 +0f/xYXo6SdRxdvJV18LEzOHURvkbqBjLoEfHbCC2EApevWAeCdjhvCBPl1IJZtFP +sYDpYtHhw69JmZ7Nj75cQyRtJMQ5S4GsJ/haYXNZPgRL1XBo1ntuc8K1cLZ2MucQ +1170+2pi3IvwmST+/+7+2fyms1AwF7rj2dVxNfPIvOxi6E9lHmPVxvpbuOYOEhex +XqTum/MjI17Qf6eoipk81ppCFtO9s3qNe9SBSjzYEYnsytaMdZSSjsOhE/IyYPHI +SICMjWE13du03Z5xWwK9i3UiFq+hIPhBHFPGkNFMmkQtcyS9lj9R0tKUmWdFPNa8 +nuhxn5kLUMriv3zsdhMPUC4NwM5XsopdWcuSxfnt -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.key b/testing/web3signer_tests/tls/lighthouse/key.key index 91bee6a783a..d00b6c21229 100644 --- a/testing/web3signer_tests/tls/lighthouse/key.key +++ b/testing/web3signer_tests/tls/lighthouse/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCci30cib5B/B5Q -Nd8grzi4LxmlyfZFi3VfpukwdwOD1Xk3ODk1OtjAzhK46YhDclvcu98m1Dnib1Z+ -eTjRuEEoekIxz2+BbOle7G52LNvuDZpD+HKucqIU3TnEKPPuTYPplZ1n/9EyxXUw -D5uTkn7xXzK8UFXUt73j6I6VFMdHlNcwLcx8KSwBDzvnGT4ew/UL+ThON3j5rIT+ -nFHDcC2zoM+6ANdVkL6GHid4/cOcYW6GxB9TRZtEasqze41bC+kXZtPlV5V2nilA -zVj8z9ynwBpHkLH+E6sMUhSEwA++QfI1gGf0FmSBgSIZ3RdPo/dphkLG8fZXKMkM -zKkRm5hcstDP6DnTIYl+CfuVez5gZ0/yelAqXNvTqMKuDhHTTRRYaOXZX4BAiQO2 -Q6a6WYLe87E2ka5AF2T2y/BPeXjUwDS/1mFIB3FUGlMLVJt8/RLznXVGoSsYaptt -iiPucQbMPEysCJ4/LZ9zxe3EDWWjpurLHGi/Y/dVziEvg1EoycixdZogKz0QVCz4 -++QI0kPDDX7So7CWni2JJuYguF/8CX8QbCT2L8jXf0uQrq76FLKj88A7lS8DzXBt -/pRryiIlDyLenJwHmrv6p+P/FYvgnJHvAEtTynxYm5GA16YWy+Djc5XVgNHjV4Td -X3GueAp+NhBBaHDFvYCbP/oXkRvNRQIDAQABAoICACCSBxxeblblQVtX8g4nVso/ -hnsPi61JiEi3/hGG2ZTe4AMEsCZqkXmABrYxZJf/3awN7K5z/n0lxB25VACScQAe -e9JIQf9wLRgCYjM1PycG7n9Q3G9+S0nDA4dUK/h7aUQ6zE68k4aYPbsbrDdmhgHr -WC+FGW6SMjCOjMfo1FOI3MLZ7I8ys8Seqkx5XIrjI4NzvWrMsN9lrSAaXwqmNuQG -Q+ID1cmoPXPDJ1xNlBrfzLK+cHQPafAwte7k+HKmhj9HtjOj5uWQn62ra+Xhy5ud -ZPpZ2Savaem81CcQnNXte5r1Fevbktq9Bt7RuM1ppIrwk8k3w5S72CTRGiYfNPJV -M1RMp46GrXVJdmx3k9LQfKdT6Gv9xTJXYQl7jN0+4uZ7QrVQHpcMpxPsATl+cQQH -wzCTbj2Oqn/30KqkZLyueN2MalRP8mVSe5nD+vvGb/sWLs52kp6QvHdlXER2RBFk -tJ5cGi+vgueoukb+qatiAE2y5MxYCqD02ShGcLos/SUQThRhL+iD8t0h+FoPTD5y -eTNZ85hF1HdypH1If8/YGETg55+fHYUAtYGT6R8lYeFMvBC05suorLBciXShOGuY -4zBbt32fPlsXlLneAtAAFv2BiJMt0TQavWHITLInFW1/aMHDV4/Pq69sRZuHdRaW -XFXD8CjnPUS5tBMQOqYhAoIBAQDLqUo7v3SpIstXmyU7BfUBuTYGS7MzjMhDxFUl -HvmbVZlOXhnPb3p4mW/XHrah9CjFBLJt3CF+PP/njwMw0YtPxCQpQwj0pI8CuveE -4Puq2wEfxVg+JKh1xidNj8230/WINzwfLCVfco7KKmjQX0MgMGaANQ0sGnt/r1eB -MwpY5uID+D5PORXUcHxBWlsVLyzZ9ZqKhAgewr3i7BLX2y7nwqEGlWTt1lxzZGCR -a8NZIAIs3qGzAgtm7O3hMz6XZulVyVSrMmmT8qXT4Lo1nW/9J6slV7Wdp9f++mr9 -m2vwrpJtmdPcA+YKPVgoFlKmZpZZbVvd+4uy8ksoxs1/cF7VAoIBAQDExnLQplq2 -BPoxEDAnlS+8Jju5en5Pk70HOfpQGUa4/6vY60x/N5sJqc6CcDySkkxRI8jLzMTe -AE9jqM+Z39MvGCH+SF9EPRopbAJIrcChXfvk2Imp7PLFRGrEBju63nQfaHdcefFy -Ia7RA8SCHLCReRdqPjSXbPAYPZK84vVNSfhrkytA4FJnaojvaqJqLQH9vB7CXv18 -Fu6w5fnrgARIoBhy2mb0QWzgd9JMsVDgS5XyX/4HBUejjXDdmLosOZ4PJ0GM2+tr -ilO/9KKhV9lqH7DcFgJBNkgVKRD1Ijr21yyOkttB5PULzaTQhzqkorGkWgyTzLWn -ksqOr2cWt0yxAoIBAElIazvAkpvht0WYv/bTF+w81uHBD4R3HgC0fubSlIJ+dKGs -XqEzvd/zZjkEBjeUga8TF5lMYojoLjUGUuGYJQbYzyJBytEs/YDAAhzAUA6Uq3zh -J/WEf1GRscbI/f8tt+YB6hJVckU8FYFNbVW9UYwdnmR3snuyM8ooL9Z/pTOEMMO4 -6cLcCazdpPhnKOsghIURSUCabcmTzXv/8m/VoLUoZYTW8PBb9/xVnCH3ot1JFT9M -BOdCzxOEIbytEdKs5z1FKsBHbZIc9+qbrKVqN0fipETVoLZQFPrc5O7IpDiAuJPT -jFZY2MfKdxRFpAvYUjVvkmT4BLapVL4hewRmTNkCggEBAKuJP8/KJSulvSEGNqRa -9kjzn376XKAsb02caixnTHK7Vuh7fq0sIThUUhT9mKBBbswRANtAv6Gz7YE4SPmf -1+6nAAM2ve2zwlm3sWoAJfvF/W+qoJ+EUsJK+TO3J1yozdwPanYwS52t5UKKIUU3 -k2jNge75GUmkCs1m58NHqoXc5PWKTtt4cf17LrJfaARdBe5Wjw3sVtdU+nE1mh+E -8rcI8Sc2Yyes3Sf07Fw0+wb8fVPUAJPIM4JNK8XRfQJOnA4jr44GrPyLkqS0sw0p -kvtjcv75JLAKjN39da3sUDCctVf4h7Cy0jee5n1uVV3uAiP+6BX0D6tsWK34FEsG -MZECggEBAIi/sjZNQjplD5zOULEWL8W6b+3CZymR5Qqa0brlx1Lz8h/daIITIFvm -bue/CjIht/oRGLVE8yzw2ojLf424h3h5PjmXMBNHlVkWQXfn6xCI8MjfZ71uA39O -RVCXAYwcghOWZL4Fkz+XQmIOdJ1OPXfU0py943joYZbgXXAYOc/zNylo9j7+bqDK -vLtFd4IIQoRzjsY//FoAuAditf4xDRqLwOh4amboZw1Qmn6bwDnCaKsFmA3o5BYR -4aRUm1dEbZgPtm2tuHQpEKuOPhWHroi3NsEdbhoyy3IUe0c3w4YGgnuvVy616wkV -GlPvUaKC1KX0CX1qT1anVZq9bSMTG+M= +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC1R1M9NnRwUsqF +vJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52aHSA2fs2KyeA +61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDEjf0ogUVM9TCE +t6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAwOz1d8/fxYJvI +pT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5FeV0fPth+e9XMA +H7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI0vps1zF9Bo8Q +ewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWMVcR//EtbOZGq +zGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr67Vyi9SWSM6r +dRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91fkpT6kjc6d2h4 +bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa3zLeqd89dS7H +NLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcfTPFe8xuDYsi1 +55veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABAoICAEP5a1KMPUwzF0Lfr1Jm1JUk +pLb26C2rkf3B56XIFZgddeJwHHMEkQ9Z6JYM5Bd0KJ6Y23rHgiXVN7plRvOiznMs +MAbgblroC8GbAUZ0eCJr5nxyOXQdS1jHufbA21x7FGbvsSqDkrdhR2C0uPLMyMvp +VHP7dey1mEyCkHrP+KFRU5kVxOG1WnBMqdY1Ws/uuMBdLk0xItttdOzfXhH4dHQD +wc5aAJrtusyNDFLC25Og49yIgpPMWe+gAYCm5jFz9PgRtVlDOwcxlX5J5+GSm7+U +XM1bPSmU1TSEH233JbQcqo4HkynB71ftbVUtMhEFhLBYoFO4u5Ncpr+wys0xJY4f +3aJRV5+gtlmAmsKN66GoMA10KNlLp2z7XMlx1EXegOHthcKfgf5D6LKRz8qZhknm +FFgAOg9Bak1mt1DighhPUJ0vLYU6K+u0ZXwysYygOkBJ/yj63ApuPCSTQb7U0JlL +JMgesy1om3rVdN0Oc7hNaxq7VwswkzUTUKS2ZvGozF3MmdPHNm5weJTb3NsWv8Qo +HiK1I88tY9oZ5r91SC82hMErmG4ElXFLxic1B29h3fsIe/l+WjmZRXixD9ugV0gj +CvNa8QD9K3hljlNrR6eSXeO2QOyxAEUr2N1MBlxrnAWZCzXKiTvTx1aKDYhJT0DY +zae/etTLHVjzgdH6GS33AoIBAQDaaWYHa9wkJIJPX4siVCatwWKGTjVfDb5Q9upf +twkxCf58pmbzUOXW3dbaz6S0npR0V6Wqh3S8HW7xaHgDZDMLJ1WxLJrgqDKU3Pqc +k7xnA/krWqoRVSOOGkPnSrnZo6AVc6FR+iwJjfuUu0rFDwiyuqvuXpwNsVwvAOoL +xIbaEbGUHiFsZamm2YkoxrEjXGFkZxQX9+n9f+IAiMxMQc0wezRREc8e61/mTovJ +QJ7ZDd7zLUR7Yeqciy59NOsD57cGtnp1K28I2eKLA4taghgd5bJjPkUaHg9j5Xf6 +nsxU2QCp9kpwXvtMxN7pERKWFsnmu8tfJOiUWCpp8SLbIl6nAoIBAQDUefKKjRLa +6quNW0rOGn2kx0K6sG7T45OhwvWXVjnPAjX3/2mAMALT1wc3t0iKDvpIEfMadW2S +O8x2FwyifdJXmkz943EZ/J5Tq1H0wr4NeClX4UlPIAx3CdFlCphqH6QfKtrpQ+Hf ++e8XzjVvdg8Y/RcbWgPgBtOh2oKT5QHDh13/994nH7GhVM7PjLUVvZVmNWaC77zr +bXcvJFF/81PAPWC2JoV6TL/CXvda2tG2clxbSfykfUBPBpeyEijMoxC4UMuCHhbp +NpLfKJQp9XNqbBG2K4jgLQ8Ipk6Vtia/hktLgORf/pbQ4PxEv7OP5e1AOreDg/CW +RnQtBb+/8czbAoIBABfDA8Cm8WpVNoAgKujvMs4QjgGCnLfcrOnuEw2awjs9lRxG +lki+cmLv+6IOmSK1Zf1KU9G7ru2QXjORZA0qZ4s9GkuOSMNMSUR8zh8ey46Bligr +UvlTw+x/2wdcz99nt9DdpZ1flE7tzYMe5UGPIykeufnS/TNYKmlKtivVk75B0ooE +xSof3Vczr4JqK3dnY4ki1cLNy/0yXookV+Wr+wDdRpHTWC9K+EH8JaUdjKqcobbf +I+Ywfu/NDJ++lBr2qKjoTWZV9VyHJ+hr2Etef/Uwujml2qq+vnnlyynPAPfyK+pR +y0NycfCmMoI0w0rk685YfAW75DnPZb3k6B/jG10CggEBAMxf2DoI5EAKRaUcUOHa +fUxIFhl4p8HMPy7zVkORPt2tZLf8xz/z7mRRirG+7FlPetJj4ZBrr09fkZVtKkwJ +9o8o7jGv2hSC9s/IFHb38tMF586N9nPTgenmWbF09ZHuiXEpSZPiJZvIzn/5a1Ch +IHiKyPUYKm4MYvhmM/+J4Z5v0KzrgJXlWHi0GJFu6KfWyaOcbdQ4QWG6009XAcWv +Cbn5z9KlTvKKbFDMA+UyYVG6wrdUfVzC1V6uGq+/49qiZuzDWlz4EFWWlsNsRsft +Pmz5Mjglu+zVqoZJYYGDydWjmT0w53qmae7U2hJOyqr5ILINSIOKH5qMfiboRr6c +GM0CggEAJTQD/jWjHDIZFRO4SmurNLoyY7bSXJsYAhl77j9Cw/G4vcE+erZYAhp3 +LYu2nrnA8498T9F3H1oKWnK7u4YXO8ViyQd73ql7iKrMjE98CjfGcTPCXwOcPAts +ZpM8ykgFTsJpXEFvIR5cyZ6XFSw2m/Z7CRDpmwQ8es4LpNnYA7V5Yu/zDE4h2/2T +NmftCiZvkxwgj6VyKumOxXBnGK6lB+b6YMTltRrgD/35zmJoKRdqyLb1szPJtQuh +HjRTa/BVPgA66xYFWhifRUiYKpc0bARTYofHeoDgu6yPzcHMuM70NQQGF+WWJySg +vc3Za4ClKSLmb3ZA9giTswYMev+3BQ== -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.p12 b/testing/web3signer_tests/tls/lighthouse/key.p12 index d96ab477574f923701367fcd1ed4c5f6b1c45b35..73468fa084b6f5f1b036afd643967e361d004fc4 100644 GIT binary patch delta 4151 zcmV-75XkS7B9kJJYY3+c@CmIO+JBQI2O)nOCL5WWw@>l1cs+C1yB|h?f(9Vl2k-tE z7qeWxNtDd#Lpe*u5FLG`k>LsXadsNJl=ZK1cqbEbRIN$U?K_VtR3ks4949hH1r{z? z#kDB?yg0I{p(VeO2^ahb-IP00ixRw+D#3|4w~{Jy!Ry{?!px2@!Ij&L&#ycnUf-^%Ahp^9SKD8eAQmRzBo|^xD;&eFapvc44GeFe2Kku)8 zhxrGU0MHwmx;mQFzk*LlSs?EKQ?T8>JtoDE$0_iA;-WnF66NOtqjcELqsgR%5sG}G z7Zz|hN=G`KL>h9BSqvGGPG!u>W`BQDLK%=vMQ~t@

~Ek)Hr}-m+R={=UR-7z<4> zIn^A6el1KUz=8A`i8F$MXkc(OENVAQSP_B6TFphO8wI;MS)^g_!eHy9M`0Gq2xorb z<7tuD;4F~7`~3w1G$sQldMR*FUPTE{Z7_nQ980JAe=E5wG?jQ|+Da3EkrRK%9O8@6 zK)m9TZa*iIsPB#y!c*!4G|juiopPKsK$23`#_Y>~k48=`1?U`%Z4@A6$7yGksn?yci&=`qm^KaTWhBi_!@%Xzb+h*bEQrlSO=ZX79l_rT(0%S4Kc-D}p zq#o53^k9tGYKdOCGLclZ?FoN6E_8}cWKF&<1<;2!_}rSm5{0UO9rb!Bm%_x6p-Fm2V^Bu z0?VZ>Lobv}UGQ4H1LG>^VH{b6Y-FawFG?huE776>thiXE%$1Msa6f-nVmF;VBX-aQ z8|}2`5zlO1GXw|cHxl#?n^``tsof=+*;B{6z+9z6Glp22(#=+qe_r${ks^}6!8oV; z!@iH?{T~5ZkkHo?AXN#d8HEl6X!yWq*dr8CoAI{2$5~UX#fu5uW)!--Vf}xFO>LA4 z`I9X-?9SIsP4_gMLDYX+s=gM>ZZ*(UFH<$#A~w@Y;t^7;_HY4x9?ss6e47;{k>rLh zJw`#OJC^6j?vu^v{hJ6Z^HcNPSj_7`$4a4(j9#_3qCppe57K|uc!vU&S!$zv{uqQ{ zK}P?2Ai`(3*fKSVdmRI9g;$IQma@2Ni~08~t;n&H96AAtS`TI&mhRl|+e<9#Xkp%F zjQ};H0-fl!N=I8*cqiK}y-VcCR=xu|7Vo$w<2~LI4^K zR}6m%OzU;^2{j`rNrQPi@jsfViQE{!U4y?GRj9RYRn<&15$1|Kd}bZp@OOnLAFMzb zKD|*YJB*8M#TuT@b{vxq4@PWR8s7V^|I0t+!O{4~19g8JyWE&8_6okDpVHvMD5=Pk zw%kqhhO>lnG(7%bq}HS>{e1g@&#cvtw$M&WYwflH|9Vp}P?QPX zRl>!x0V$L9opI6uIPp3K&jU>i#cq{8l^mW5+R@iJSNN#!MA>k%IV>&F@lZv|RL~v& z`d>fd!~+0gAS=bR*xCfTJ{gW-p~`V7{{8oB$BTc317L0mf7AY+ZdV-w(1@D0YUI}4 z@-`@3A@Nl=LkvsH2?C}B+efez-kfvZDPA8J@C1Sh zP<(1HGr6Gk(~-_?A}KjOd1`(hW1e4IGEc-vteTs%?PUW4zh7?j1{b=U*2(vlM$@t$ zpt|hm_#cP)ne2{g2%*O>^6U)qyW9tIFj9KUNG@S~DlZswgs#tHF(ErJ+5{y6Eu_3Z zEBO@ie|5&|6NI5!=bUUSLgQ)j%{nVd$VW1pg=Fe{*-OHMdX^Q-@d%S`Zu$dsq@)WR zLyA~SWtZmMo?E9phFdIo0cwQLwO*z16RpoE9yf<&wF}`U#~k3FIA4|>%;&B`g?e~iFPV>(oxDs?Mvjm48-ub7>?#i>v6MFQ%* z&_!RYA3-S_fS>=Kw~x4GXELiAv=w-DZxTx zrA4C&XcMOL4lXU5FlHvQ%&bRGMyM7;bCHUq-rf*saHs-~l#QdS{b%w=t)TtIe~><$ z_gly^hf&S_(pi?ZyTF|oY3n5ojR^IJX|9JEFp4_7va)347!o;|_F${U@)AjU5QdmM zG_w}oL70ORJ|sp96PO1S#(KUMzZGB8FYr6grhBFzv1yY|j|;W90q}zJvbp;{kejp| zL1&~`qlnYMp817>j5+5tu>}oof1EdBjK6oDc^;dWt@1F`gEO$8j+cJ?*_PKoUq=&m z1JXmj6=QT5PifaH`3;1cQfgLl-!k;#@eaIvxeml|wS0acqy*36s-1%159SPUvAADt z#;h=q(nZNUG{#)<;o9uP$~{{Y``UkM_#>WNVLZyLagu1zt$A(s1>E9Me_`-rKpwbz zg-f=nAbman0`{%}g593z!QbEzCn}pR@R1GJC`QL6A8O&w9dUvIa)*6DLnsbMg2lk0 zv#E}E4-KZT>HOEo&`C@-VMeA}%d6O!FY0*)iAf%_zFf`jWA2W{HfK5}NcoYhH zurOO%P<5A;KXO@4EmTMa#XPm!%11E0TrRCcSKr{nXqdw+9I!##z~%z5mK1Wa^=GZI zN0uF^x3h`7Oi#fpsuk=jg6So$cr$!SYs+OPqB0)UW`+F$+S`f!e`HYFeJX4UbKp>u zl^UO35oC|h)PrEJd4R1c642SHi`==XxFiuK+hxx=hO`E1$2AfFsItQZW`&=D$12k> zoAU-I!I1lDCn*)MC5VHHD89)GhN>ej&G5DNck~EmaNkos%wf_5rAp|a`GNwUf_n3s zRfeKK(F8Vxnp+Z@;H3RND7*w{3yrJU7KiTlE~wd-0RhYVB?>m-3Is#@vVNUMagDGO zg?m;AbHrF5U((-3@e?)}I@lH172kLF-|bVv88K-OIy7wQf0F#BRt^fl5~CTu%QXqw zlFh)*ZsU?L9J-`@#^1KjM_&q<-g1#5KKw;n9Og%dX#!4(%ZxmGs> zzSC*48;X9;RUXjRZ4zAfs`&!Ik%L?ZtZi~J}@~f-G z>+x^AoKWKN3MJtSyNe~1&*#)0a!pOgwhJvB*(-^f_& zS_J+9TG-*tT7Ad&g6ao+0EN6?H^vclr|qD(x?P#t!eKFdadkw&nm-fB88mM}uJXB4 zq28106|K^T1$02v1GSAo137EQhH_jYU|7O}8e7bH#szi_PO$yW%)#rm~rS`ZrnFZIa69`hV5o41WHZFN|bd)vjWfiPI{lY8zpJ=N{2g91i( ze^y4$S*`254U_PU*Gh^sye{*f!Oq=4k&@&6>Q5$q>4MM7ENYRyD(oagxir=b@T@wg zKxZ8z6-6Fh{{-hal$uA~;kgwIP?;1pGE=)cO}{h9gtDWeJ>}%)^5g>AgK*%aPis1I zsU8s*&H}A~4ZHwY>VDYM{PBnULw0CWGbYf4+1O zFb^#D9gU!}mqSw)@lhLMFYLk`%!^mK%QVMzK?gBS>E{pl6E~@3qBfg_R zj5H90AmgGneTm&MB`_lf2`Yw29)|Y~0SOf`76cS~cu7^M1-2D*uCik+(p{nN3QPEt z&=3_a<6x54D-^b|p0EkXq~!Bf#ovEcKt*6Ugrp(Vvzj9%1PBopH@9g$uKxl82mm(y B^Kk$G delta 4151 zcmV-75XkS7B9kJJYX}f^Yp1Hb7~hj52O)nsmp_ixrd^X5GQ^=~=6;%hf(9UtMyoGxKo1$ z?jJSQcNpe~Y0=Y5=KTa?QEb`@$6cDa8@)h4PM=bv0c~gFD9?0rg1rA1o}Bf=A^CqX z1?of8AC$=4MoEN`x)D4qz&DqByKD-NO3%>a^95JN4uqy0r-p2?Pe(tw7*k85T#`m4s zzPUiq``kMeOmbfeS=U&1Z0@&pzbJp_ZPD+4;xhiVw=Fb^7kO#mdHB1xX5LnW?BFn~ zvF2Cw^AR$2&XpK_R^^^D9Tfdr1NgARMJh+E>4XgBiXNfsC>hT@U9LojSw1Q2qL+?R zU&!wqd0tpElSbp1P6sXl;_(j1!JWeXKA_1~gbEw~b#kBVE7i?kfiu!n9WZ}4IVe8Kl zl^xuUM#xY{+(oYYz6aa?5`jr+J{P<<$3@V(&nuR3`4xNYv5+Z1wXrh>7 zt$a8#S4?u}*HiV4_LvEczWjgREF09{$51@-$z}c@b2f!`{}vh`LzlwTZn9UHbonNH zy7`jWA9M$NlDh~ zL*$E2fitR4RYht%{2wXZ21ax|E8`6SN{8=BW8OpA2!V|^XIg_^ORa#Dchj41l&u4? zvl6m2a}`hub{RzV@Y;Xa{K_DzVS@Io@HIIpRNiWsnqU_v7=hL|QeAbyXq(EBPvobq zJ2H|l&7#{jCjo2tV1h)wxgCtQ*JtIr9`HKQ%gh&PEn(G*;L7KrD}Bn!Z>Q=ReXJ?) zLX`L3l12V~hr|CJZ%KP!vWZ0^)xXxa5j(h1nvZ z0VX9lU$}B#CU!O#HL7>zdGJ6AHQ6`W6qhpaEgXYAAs%I}Co z8hhJ(Q^bEVU%+r`hQG^IME45JHxMj3LZK+SB$0?8-UfT_sT``De5dPZqX{CeMm(#^ zI+o>@(!=Y5g1mw`NA4%hSZ-2*#})dBUDKbZk481Ak{eiTnJ#u=`BJP18fu06d%OEmyMD8*i{h zDnfLHoxf|Taju{u$2kXrYicd;l-gx;M(hyE*QI*BiB`icT;X{)NIW!?igZ)=j`h`J?|!G(kxA$H65N!MKrouqCvn?4NDT`~#5fV*XMsB`DM zl>(?zEeYbtUd3s*u4E&{;h81CkyuS!rgE>I8d;dhqPL@BttinEZV~WoRAwrYwxdtZ zMKtnYf3jK~-D7Yj7-21oKZWFYDj2(bN>hU(|0sMUJmTLhO?i>~`yrSZrgo}X z{ytdF+&O3X`{TtuOK4zAk-b`t@Zakz|9Zx`d-}MvXPLtFlCxvrTlJzW65SH!G6~+6 zfx=V_P*XIM7LTK<5oFICM=VI@uqDkyqbbptotAwn5Rlhg6tTQM0{_ ze;JqH-0BxJEq5}%@5aiM@srO!ND4;0bMnT*d&m***wwr*l#da2cFW?4n(Z&}Hz{LW zMCKH}qqCaP%H{vK;kvzUH=k>N-~~*)#qR?)TWr^EI<8f=m%mrxO%XHvAlzAHY6VGm z-I8BWVqy-B-df?fBp;K%?&Pd_*jQ{HSPxJs=EZz&#tee+ zB318KkLRx9Yo@;Q1IG6(sbYEe%U=QHrIA3YB;n^7peGTvPoWEWlc3(v%H-^hf1B}} zUz^|AcPVXR!V65}TqPIS6?+8u%~HLFEjIk=Vv7rKeUiU9_?h<%LNz3sHw!5ch0{uG zyr7VQ_FTf{)rFzQK6?WueK>n;@}XKxr){X;~ie{I8K=X54*{jszud-t8(fkjG*nx1a~x3?7(h-fvy z1;xPDoMDb6K}4L1lNqL#M3{tNrpAUzE#o2#HY1%-!kdCk2}g2T;Tv^ucmbhYpjVgg z0LUZdd{$sl1~Wm^I*mD}uH%F--Rh$u>8RaSUx6|wCG}IQa}kUiYrUipe{+)FduH2e z11l`~plOJ|DfsoLNrbmGc*)pG(d;YrsN_Vs#x_qK{-3B3ENDFGxFcs~ZX5_8R`m=B z-^DBuf&49Ruk$iL94I;EI*E-Z2%j9AeCJd+$aOmpK0 zk6}DBqEj!j9oGXqeru{HxFWy!k^RDD%KLB zjh%LiWkWbAvk;<;jl2s-#V5-U#f4gN&wdKsidH+(IYi!R!g2f-T>Z{NbrUG*JempY+mBH6F{{*B-n+M@*CEzuoxM zw>C)O?KLOaZI{l3=Vg6fgEhS(W~nz8-6yP}?}obFht$_bCPo#9&OrdP4;B%zBfT0- z%e|a-TZ?#Lr?)h>v&Uq7M(bRJ!{i*n_L_s6KwK9DaP9=ze}5?acFrz7tvS)!^CMGH z)Kjf>EJqj;m&xcU?j@hm>xL)5UGqr9Q#7P5K&zJKNW+GqGh5{t(>v~BOIl74nVZdQ71&1|y_B4_peG&!SQ z1r5MAJTW+)ozgwV znJCxi1LBvisI3ii!vS;A@BIZ2X2;h1KdvNwyhO9}fA(g@mfCDAvWn6=fo8T)X;YiT zE@)j!%ol~*5WAFME7B0mGoK!PGZ@}gJXXX!#=KSlLX*)AhmtWXx9;JQ(Qf?PBIaE` z1pW$LQ5a7Zub#+$fCkjq*iOJmDZ<_SGFCDhKsuZyZ{9$wd&t=;|fqqj%0s9ySzUA#c_; zo2af+aikI`vsmP<2qO?q8N>zs(ptDBJ14v&e-WgVZ%-y(U`6+e&H}12vvj%}$=yb< zduPXdt)rrhBu>AZlmh>ly{R3SCE3$Pf(Dfp6ScE4VkXC?%`4TV`ZsVSbn8#1?zG#*fis(j8oIf$}t)>)ct ze`De}+=S>1^eS^?i}P^mzSz;ZZ!k1Xc{&%kRpAmf#lb3q+;jOJp?4dO9r%MVqizG zh6C1#HNqA6tP{7PH2_cRi9`IdxNGV#f23b7HCxBEA1EQT4~GJrQuYG?V%)aeV>y`Q zk?C!*c6&1$a~Kc%jQtAQK-Pji;-@?5-oZcL?;K)+u-H%o}u@wH-9sgYA3I^_xUmAzn*isG_e{8;F z>aF}K)+;*729=mg8FbWJa>Sxuw%TgtBnr6QyiyvvN2CJ>&fc%q07O4Nf60$I{~>%i zqrx%G8M2yj0}?`2O)oPP8Ywg-wS#692=P@oNpz7f(9T$R13Qo zxa|17T~o)w$aR~<8=B&RuFDu-X0|!E$4s=%zVCuOXt{|Zz_W!`HK^*VM$fV3rmJHd z?SN8+Q9FXA0~#PQ2R7_SVcdpTCztQI$}2fT)3zpUN-367?e{kCo#IxcZ$N;|l$n3= z4}u=?AZ+ifG0=!958CEPv!YU~;+ilqvk4@tDYn-g8xP0Z&%*c0VOXmYQ8`I?^)ye=B@+SbDuU9}QLxvlz=(Tk`8#LBJ1Vdv66z)&M zm>OCx;WnYfG6yXh09Z%k-gXUQ>LP!jk!^A#A`kyyHdXMBzc6BSU{4+=C+|k}_(P?7 ztuEL?bsJ>dwZlcNm^je4Zl~+Y?RsOA91ISSW46+tKOaC6WvI) z{(6GE2TB8 zc#Bg1Pv9{(ufWP|WA`=tiSz(17U}fIh$7o)zp|fDQvuZ(F{dD_xUhgdsJF=oUDIxk zj%Hli#K#186ctUmU>RewjuQJarggYsuD@(gVqat!p`R0+rGVGnFB3bi`}ft|>4V;z z)XoH6kSP}}ii1pNT>9qE%hZ37wGuic4_||QPO{HO;(iqWW`=}~O1BRUoOPKOO9 zMEx0I@Pc{<+UQ0vZ05tptawW{pA0dzFOL<}29``e27gVk;pXRqLdWj|IIye0QCsp_ z^a!Mt%0=z+HYVC>8;MC)OeNaUg6TQw4gK3Gbw-j-&m0T_aS&*xpk{xRJ;#X29zQly z@p8TnlRatki<3G_{c`23E3$9 zE{!}_ppRm_MrkH;BH0x~(MB}OzsCv<8z8jH{2T9{7p!J{ZBjlh#Ue|e-f%6V4#-bJDyZvs=z#H5pm4I%SVXUF#kUAl z6E~4Q3YM_pJ%h;9$u{rAs&kfTYrMjF6wd9HQ{iDlka0SDjgEABcW^TV_~I4KEkN3e z;&1wMAwfpQK45={9z?s@BdMJzSoTn$Lqb+PdHK;LVh<0iG_(K#O6j<1lNY1q~038T}9V zkf>o^v96pi(r%a{NN4OGR|kzkmc=pse(5xc*>sSk6ukC&Cj%YhD_4%81z z9}bclv|fLEJ34=WcL^)jZFcB;A zw@JA;$ly$MyiPf*m%WQw#$xWi>KG50u<|rQ=v$z&N$SmvQSnFQwPFg;f@ltIC zDvLna1)L4UWdi+uwga@oV`{#3oJ53h_U}N+ zk%@kSC)@H?kbehylWz6qP$$0|(|tKU+&vA!leQ;{MnwSS1syp0<|q?c?+(X?_9BsR z`BRLV)@`|$;A%`EiNAa>`+}ZeYk_t z;E|Mq{mMxRd=?tlAS`&Rp2XBJ-k^RY>iOMiLxS7VdJ(pa1&SV)oKCZ8TsV*$B{z+_ zf7#(Rj+Il^56eQJ`6#_x0bCWQ(Vk+$?D|g?aPLz9!JIjJJb^UOZhhBp^42A=Fs1`; zch?=v<((uUaN5Ha8f-WxFvn$%23U)6D26kwKF|Sz-%BWLoFWc`A^jv7uxoC;sp)6; z78xuuq)BrQhy__z1hW3%_6jU_Y9NQ|fB%qNY^l!Dy|-8avLX(G|E`&&+|4#Gm>kEN z(!+bM_LOkRsQe0wUq^b@BdBtG%c=Mt!V^6&Y6-K>9s-3Pz1l-0KgH~Rdv6$1fc>F8r?Y*X?RaLBm;qG zP7l{=Wt{wH7{W>Sv`g`ZZfwL3e^mJ7!SKCTuwWdy(zb|2{T(vVi%a0Pk#$9z%Srxa zQ0-*hH|$24b1V99?9+<$$Y@p~4{ak6zgXyR=jEM$_FPAi>K4`&dtfyR@$A8D9o?0r z%fowiKAFguBB{Jw>KqR{1;EZP9x4l8jS8^RR6#fj%L8+VIR{SINiFB<0B9NmLf#EJ~+(QW%s3|ma*^0W5 zEkbw%Z7)NqG%6rZl-k%r7`4+TO?YPPj}7a(DDdWhb`wLJmC=2a_=i;wGhJ(#v|&G> zCdhyrOu~(03@Dr~6=id)f8Y_AtP(LUrEz_J;;!t{XNp%U<2UDz`MMLJc%Yv6|1lef z!>&ofs=1B~>?`sY%}(yQ3%55z+_h~TX?-XUs;7a4d>M4M6quDgl1|QiDoz~wpYkAU zklnmrK9sOnf6HJIgZF``m?ow95s_}R78TpN?{uF81d((Ax^(knf5ZyQB+HR(c@pkYXLEH^?STbvU3P=2boAtvC&Y13xV1E34UD>=qXz?MtFA!F1AoW3NW`xH(^5 z5`p}1#F+To&Se!iXufdxfXWe zj47Fh$xLkfHjgWsDAnBFSqBLcU;8_Ku+1VwJ=Bme(stvXqWXT40Jw!WiH;8YQ#;OS zFz~zVP>VwX&T}Pq;nW9&8k{7p=_2jwylixxTnG`Ts9}lhf5(kSiP8|NI7J(w8F>5d zLAPQF94X;z*C@#p?lCi3w?flNv_i2y6exv)!A?2_&J*1xQ>4Q^<6yhT1NV(aYSEp#!0rUXhAMg7E}k&;ZZczB`! z4mcZfpK&jcp7X5xJIz0IPU{%5F{Rx{H+UTWALR$xHKXBT`4H~XmWJao;os;#<9Jwy z)&7XP^Ub8IoJ8xC_i6P2i&i$M(U4}?+T(?35+gObf6aJMDb;bQd`9^IMF=Lg8@+^c zsaz0|ZxVFELnv)Ei9?qVjy{@MFIG5Uqj>AKGCEzClks|~G`dEZU0(;^>xdjGWESP} zurmo<#u;+7!39-+&6w!)s3+0<>_tKfgfr*6v^1*!8{KWQw*a6Oyc|V6X){mKt@`9v zwmJFSf0Sz7*1OVtvf#~=8mb_y?|u!OsFSaX9Om-rC*>pPc8A74=W*!uv3pS27-MxCxAhcw_oXr^xZ*^iNY73RhPCUG`42P z?_sOrPZ)cVYiarOWH&W-IE?AB%ADSXBXi6A*K*&gRCDxmuT1B0!V)hOizRK9bIcWR ze?p9-b8CKPJ#I6_vn$v9Q$W*LD*SqdY#C6axSSXPnIoM>}^(ArMIVf%E=jEnghE{+&Uc2zAnn>_7t=lB9mO$}# zaV3S+6w%VIwh1ThZbJkj`oPxWgYx~Ee>w55s!Z*Lm1oS1(h+n04gMuGDtQ5kKSg3c z3y5swY#6*#B!qLireXQ(=|YFxxa7uq%6{131R=fnAT=|ej*_A***FcXzTqfV=|J^f z=~Ipd%hc^QoG@uWL{ayewtta&?`)!re*3%U@9iN3_Ig}~Z7je^Q(Kqq#I*`pe@AGB zCLGp?Vv(`MqSke3*)OtStE2)8*34KzaOOU4H+cd($!Y3@=;_;Lx7V}jj>Tp zx8&G^GSlNn+D=>ndG=_e=Cv5)=IU)|t;N^x`m?c`H z#ZX)w#S@D6qq%J{B`_lf2`Yw29)|Y~0SOf`76cSgwqpH_zTbTXZR=`Lfsna@+zZ%~ z&=3_a;h&dO(zw9`=bGHBWSqBms$l5Usm0D#{D+4I#b~JH1PJ17q#pJIW;+4`2mmlM B_uK#g delta 4151 zcmV-75XkS7B9kJJYY6(2bhQlWX-JbK2O)pQGX-gr+UX3{P+;!f(9U7XsKF@ zgus%1a8aHi&MXl<1m!e~$1~;QKP6XC>H-O1TAMZO0F|mRiZV3C^pf@OXzv5}KBS?f zmt{8#Ot1xMU|NvNH?AU2Wm?DgFYBk+gM81Z#D9;4s}oU&eEdp9!T}}a&8YuSlZ}5> zcUW~A7*ore2*4g1=3|=oQ9hNhH&F0w+ba-WJGMwG>yorjz=*JshgB&*Edh+54b!>b z{;d2*6v=sJ5=nC%U%`Q%?3Yel;%i{Xl|WWd6T_Jo8^6DzpF@jveJkRg8I7i2$x(bQ zoPQMb!YzO{WsBx`FH7uKJc ze87Eia8%)V`~qKcsp-_p4-bAEKm6<4_m{2t0*H-83`FGPBz}JG+|#=&{1blzz5QR^ z5YOKs*&4EBme8TtiTX*>s&nW#OL{OsBIUn1kfqwDNV6M_PLq=D#L9Cd$RNo4eW1Fu!2!Dg`K}l91L$IfKy-M?I5=Q!MqwqAC-)g0l|xU#TlYzp`@nL!`7)}rS(E;HGp0?iH!&S%iqQP zhv#D>wCKMXStB-7QET1DX!q(hygWP%=N1!nRpBbglp5@ap)JA^4gP=^TID8am+h^u%n>R3dr}?4WCou6+{#>5}lYHIY&)g_q3>nCO0X z5MFrTxdPJnlNwLWUz*bj(vx6>Yc%gg3UBGW=W+)6MuHBH8BWyK`)U<&bM7wSw&>%T z=T48b;m=t(IE&(F3g*%TEwhjH=L=xOmula*0PV~y*^O9nA?kudL4@gy#fUrXCZ51_T!I$mr<@K2 zaw?a1$)yP`qQGg~iHok&8z*w#IWAJb*&eT@q1%)ptBuZ3p4#pF3XPKxA}r?BV$$(Q z6a9=TejQC-ZdiW~JgdHI&7_ATvO2tb3@vh4mRIxHLYi#Vws!>aS~V96+(WmIm? zttb=ou6utf^W5KTZY$T#7O`p=q5}eX%ZrQYUzuG^I{Xf8ubK>YG*Jodq$JZ4O*q0r zxWnCbb?*sARMu^g4whEc0hjZh8!H|5i*O#*kgSJ`xeReQk$3dTm>rSx7)k4|g9F;K zZ1}tU!?O`u1R_o{=-qx`Y;bP8b-Y3@6fJ_Aq+WkUnBVwbLOfxO%lJb8SFA%fE@2uP z)CtAe9?)!|eltEIEBcY<~44I3Wde*Z);*n-ZNAJ;^4C5P$?eg#}Yg7vmF^S;;+^%sGz z0jz6CA+AR&5pKLEDqC*+4fA zv(nL>f01_NX%Rmt%WOsIuiJ}-g&q(S8VclRKYL9eG_4{K4crNlFCBw!s){}xam>8` zCk%rA?tq7Q+{G$cB4DIS*13kWv5u~EvA!Kb+`g=FMoDRH}NG%QqCY#*AL(2nSalr((TX9}ucuA;)#8a8x>Zh@rZ!BorFNOI}4D{xadQZYd;I zg6B8gBIN~zh9I!XH%a)d^LafKm-JNmLHn@LA$H;AyR#OF935jUuBw0dEZjQD2;G~l z)rv_}M%{Fvuv0^RXlxElINsX=cQ_}{e?MorF3n&WnffRx=qz+QiG|ase^UnL^_$j{ z&hXOe{vB+U4b7NaXGwB_uL`H5#Z)@<2v$~i$`-7hBeo1GCap!xL#kBWCo7;|*Q5q@ z?T?G;3Nci5oJ7rM5}v&s9(QwzR=^O4=v@`^)^;v%oVgQWH5uw;btZ1wm2UyzMgJmtVm0 z4ICG~8{sCGCFHi*B_+x06se1P@OnvMy4V^p2DIn6S2Ve;wF|oIY!~7{R=vM*-r9-9 z6yso+n-e%E(^F_)`oe{r^(5l(P5^~%3?FSuK~>9yYknJ@Xw*|i?LjwB=2 zas`)tx~y}xg^)ZQ)xGSHzgSgt|9zIO#cOH((|a9rJ(Fbe|HBZF#o zc)N}n;uUo@9yf)%LH8;j1egR(!(7zU15}LrbSD z5fUly!oE1kCqcs;$Z_0K`bx;s_UvVvnsqd9utjw;tssn3sRZ9@lrxrKwmGO+-(w)FrL|+L1V;|f2{?X!InXvILTX< zjll~tvNI=GOv5oZ=*Y!iH+c5591^lbI%IY1{e;OB_HcRO9avq|+iJgOcKC5p2*?Ev3bFHf~rXqmVU^#8s)n`#z z4`Lrk)R64488)%w50kpuHAqu#P^&I_-Lq^rnSuHqxtz>?JS+xO{e1e4#{z9PuyfLu$rw9uyG2u}WeN*b^Wt+WChJ zzP02aa1Vkqf2J2g3QtySECKlP`Z$iIwbS%gfK5-g;i;ycGV_BukqB4ZPSqtt*4D-W zP}LpNLxXDeh!K}>>8V3|9#q#$hSegO_kBO>0yWVn_jYQn4uy6qUf(K9csUwkto*w# zBV3^%LzeA;F)Jntq?d~1tByNFqP`l1Asz|z#5Y+#e}O&ceDB%jP4HZ^(EPi_>|pbf z+)X=Y&e5lcp>zU+=yIiO@2@=c)trWLq|vN8*SR|gLrJ(NL^x&d3Ots_3c~Z|RQ0JR zRW0ahv`KVmnEJx;8|giRw3Brwv0UG$n9dC770P~rp<_>Vc~R7#lp(g^-}Qv1-j4`m zeXJhefAjox)-Zv@4Q3G)Y;*b4Z*ll}wE_HZ{G2Cc~A1`RgGcM^r28FO5a#QPtxIPq=>8yuc? zj})9mY*{jTgx@(!X2&dvG(yDucvUWEGNI;mf7j471npIt@=cO}^8;&i;%ls6Er_Qv zlKenz2wgggdIG}eDZV(Fx$is02m{i;4I7Qj*VAw}U;lGg`%VbKP54}xx4rF8wrG2* zTRU(V&+uK~2kMdYpm-z(nmH;nbeBynem%@Y;F9ZN3ga-Miz&O;isswvNyEiHP5!_k ze>uQ`3=5zhg9~$}4nbRPw%-SWIk9m_H99V3GEhKDNqWl;D%&l|Xj@fm)<19+q*~x}^~|Ei4Dmv9j)wS_JF#)%ng+WR~!G|1?H6=&5Rf4Zwj zYtD>LLs|GXTvhU)y>!y`!>>^@NmQ z*(3iK@r?+DO~mKc$TLv0CtDBt=OOY6g`0P`3fsX<6}OO19lGct`FC)*zWb0cWs&sV zi%{3K=KXXZePFXOB`_lf2`Yw29)|Y~0SOf`76cShNJKhTV9)PCJM z&=3_alQc~hs+Ya6B-1p_O01T}s3bgP6L8~7nx7#9l+X8v1PE+_-KIbIIkEx*2ml() B_iX?G diff --git a/testing/web3signer_tests/tls/web3signer/known_clients.txt b/testing/web3signer_tests/tls/web3signer/known_clients.txt index 33013273122..c4722fe5876 100644 --- a/testing/web3signer_tests/tls/web3signer/known_clients.txt +++ b/testing/web3signer_tests/tls/web3signer/known_clients.txt @@ -1 +1 @@ -lighthouse FF:4C:84:A6:37:28:EC:7E:A7:D8:C6:49:0D:C6:F9:5D:C1:06:BA:6D:69:49:0A:AA:38:32:01:2B:ED:D9:F2:FA +lighthouse 02:D0:A8:C0:6A:59:90:40:54:67:D4:BD:AE:5A:D4:F5:14:A9:79:38:98:E0:62:93:C1:77:13:FC:B4:60:65:CE diff --git a/validator_client/slashing_protection/Makefile b/validator_client/slashing_protection/Makefile index e3d935b4c98..0663b3cba2b 100644 --- a/validator_client/slashing_protection/Makefile +++ b/validator_client/slashing_protection/Makefile @@ -6,20 +6,23 @@ ARCHIVE_URL := https://github.com/eth-clients/slashing-protection-interchange-te ifeq ($(OS),Windows_NT) ifeq (, $(shell where rm)) - rmfile = if exist $(1) (del /F /Q $(1)) - rmdir = if exist $(1) (rmdir /Q /S $(1)) + rmfile = if exist $(1) (del /F /Q $(1)) + rmdir = if exist $(1) (rmdir /Q /S $(1)) + makedir = if not exist $(1) (mkdir $(1)) else - rmfile = rm -f $(1) - rmdir = rm -rf $(1) + rmfile = rm -f $(1) + rmdir = rm -rf $(1) + makedir = mkdir -p $(1) endif else - rmfile = rm -f $(1) - rmdir = rm -rf $(1) + rmfile = rm -f $(1) + rmdir = rm -rf $(1) + makedir = mkdir -p $(1) endif $(OUTPUT_DIR): $(TARBALL) $(call rmdir,$@) - mkdir $@ + $(call makedir,$@) tar --strip-components=1 -xzf $^ -C $@ $(TARBALL): diff --git a/validator_client/slashing_protection/tests/interop.rs b/validator_client/slashing_protection/tests/interop.rs index ee5bb114712..ee8f522cd6a 100644 --- a/validator_client/slashing_protection/tests/interop.rs +++ b/validator_client/slashing_protection/tests/interop.rs @@ -25,8 +25,10 @@ fn test_root_dir() -> PathBuf { .join("tests") } +// NOTE: I've combined two tests together to avoid a race-condition which occurs when fighting over +// which test builds the TEST_ROOT_DIR lazy static. #[test] -fn generated() { +fn generated_and_with_minification() { for entry in TEST_ROOT_DIR .join("generated") .read_dir() @@ -37,10 +39,7 @@ fn generated() { let test_case: MultiTestCase = serde_json::from_reader(&file).unwrap(); test_case.run(false); } -} -#[test] -fn generated_with_minification() { for entry in TEST_ROOT_DIR .join("generated") .read_dir() From 76054947914e4ed9b0c5760c6a527e0083387b80 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 3 Oct 2023 17:59:28 +0000 Subject: [PATCH 04/15] Use only lighthouse types in the mock builder (#4793) ## Proposed Changes - only use LH types to avoid build issues - use warp instead of axum for the server to avoid importing the dep ## Additional Info - wondering if we can move the `execution_layer/test_utils` to its own crate and import it as a dev dependency - this would be made easier by separating out our engine API types into their own crate so we can use them in the test crate - or maybe we can look into using reth types for the engine api if they are in their own crate Co-authored-by: realbigsean --- Cargo.lock | 206 +---- Cargo.toml | 4 +- beacon_node/beacon_chain/src/test_utils.rs | 14 +- beacon_node/execution_layer/Cargo.toml | 5 - .../src/test_utils/mock_builder.rs | 729 +++++++++--------- .../execution_layer/src/test_utils/mod.rs | 2 +- beacon_node/http_api/tests/tests.rs | 6 +- consensus/types/src/builder_bid.rs | 2 +- .../types/src/validator_registration_data.rs | 14 + validator_client/Cargo.toml | 2 +- .../src/http_api/tests/keystores.rs | 4 +- 11 files changed, 413 insertions(+), 575 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2a8fbdd74ad..df817dc2035 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -195,15 +195,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "anvil-rpc" -version = "0.1.0" -source = "git+https://github.com/foundry-rs/foundry?rev=b45456717ffae1af65acdc71099f8cb95e6683a0#b45456717ffae1af65acdc71099f8cb95e6683a0" -dependencies = [ - "serde", - "serde_json", -] - [[package]] name = "anyhow" version = "1.0.75" @@ -311,28 +302,6 @@ dependencies = [ "event-listener", ] -[[package]] -name = "async-stream" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.37", -] - [[package]] name = "async-trait" version = "0.1.73" @@ -509,24 +478,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "beacon-api-client" -version = "0.1.0" -source = "git+https://github.com/ralexstokes/beacon-api-client?rev=93d7e8c#93d7e8c38fe9782c4862909663e7b57c44f805a9" -dependencies = [ - "ethereum-consensus", - "http", - "itertools", - "reqwest", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-subscriber", - "url", -] - [[package]] name = "beacon_chain" version = "0.2.0" @@ -1749,7 +1700,7 @@ dependencies = [ "aes-gcm", "arrayvec", "delay_map", - "enr 0.9.1", + "enr", "fnv", "futures", "hashlink 0.7.0", @@ -1895,7 +1846,6 @@ dependencies = [ "ff 0.12.1", "generic-array", "group 0.12.1", - "pkcs8 0.9.0", "rand_core 0.6.4", "sec1 0.3.0", "subtle", @@ -1931,25 +1881,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "enr" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fa0a0be8915790626d5759eb51fe47435a8eac92c2f212bd2da9aa7f30ea56" -dependencies = [ - "base64 0.13.1", - "bs58 0.4.0", - "bytes", - "hex", - "k256 0.11.6", - "log", - "rand", - "rlp", - "serde", - "sha3 0.10.8", - "zeroize", -] - [[package]] name = "enr" version = "0.9.1" @@ -2304,30 +2235,6 @@ dependencies = [ "tiny-keccak", ] -[[package]] -name = "ethereum-consensus" -version = "0.1.1" -source = "git+https://github.com/ralexstokes/ethereum-consensus?rev=e380108#e380108d15fcc40349927fdf3d11c71f9edb67c2" -dependencies = [ - "async-stream", - "blst", - "bs58 0.4.0", - "enr 0.6.2", - "hex", - "integer-sqrt", - "multiaddr 0.14.0", - "multihash 0.16.3", - "rand", - "serde", - "serde_json", - "serde_yaml", - "sha2 0.9.9", - "ssz_rs", - "thiserror", - "tokio", - "tokio-stream", -] - [[package]] name = "ethereum-types" version = "0.12.1" @@ -2565,12 +2472,10 @@ version = "0.1.0" dependencies = [ "arc-swap", "async-trait", - "axum", "builder_client", "bytes", "environment", "eth2", - "ethereum-consensus", "ethereum_serde_utils", "ethereum_ssz", "ethers-core", @@ -2580,13 +2485,11 @@ dependencies = [ "hash-db", "hash256-std-hasher", "hex", - "hyper", "jsonwebtoken", "keccak-hash", "lazy_static", "lighthouse_metrics", "lru 0.7.8", - "mev-rs", "parking_lot 0.12.1", "pretty_reqwest_error", "rand", @@ -2596,7 +2499,6 @@ dependencies = [ "serde_json", "slog", "slot_clock", - "ssz_rs", "ssz_types", "state_processing", "strum", @@ -4007,7 +3909,7 @@ dependencies = [ "libp2p-swarm", "libp2p-tcp", "libp2p-yamux", - "multiaddr 0.18.0", + "multiaddr", "pin-project", ] @@ -4048,8 +3950,8 @@ dependencies = [ "instant", "libp2p-identity", "log", - "multiaddr 0.18.0", - "multihash 0.19.1", + "multiaddr", + "multihash", "multistream-select", "once_cell", "parking_lot 0.12.1", @@ -4143,7 +4045,7 @@ dependencies = [ "ed25519-dalek", "libsecp256k1", "log", - "multihash 0.19.1", + "multihash", "p256", "quick-protobuf", "rand", @@ -4222,8 +4124,8 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "multiaddr 0.18.0", - "multihash 0.19.1", + "multiaddr", + "multihash", "once_cell", "quick-protobuf", "rand", @@ -4805,27 +4707,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "mev-rs" -version = "0.3.0" -source = "git+https://github.com/ralexstokes/mev-rs?rev=216657016d5c0889b505857c89ae42c7aa2764af#216657016d5c0889b505857c89ae42c7aa2764af" -dependencies = [ - "anvil-rpc", - "async-trait", - "axum", - "beacon-api-client", - "ethereum-consensus", - "hyper", - "parking_lot 0.12.1", - "reqwest", - "serde", - "serde_json", - "ssz_rs", - "thiserror", - "tokio", - "tracing", -] - [[package]] name = "migrations_internals" version = "2.1.0" @@ -4933,24 +4814,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" -[[package]] -name = "multiaddr" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c580bfdd8803cce319b047d239559a22f809094aaea4ac13902a1fdcfcd4261" -dependencies = [ - "arrayref", - "bs58 0.4.0", - "byteorder", - "data-encoding", - "multihash 0.16.3", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.2", - "url", -] - [[package]] name = "multiaddr" version = "0.18.0" @@ -4962,7 +4825,7 @@ dependencies = [ "data-encoding", "libp2p-identity", "multibase", - "multihash 0.19.1", + "multihash", "percent-encoding", "serde", "static_assertions", @@ -4981,19 +4844,6 @@ dependencies = [ "data-encoding-macro", ] -[[package]] -name = "multihash" -version = "0.16.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" -dependencies = [ - "core2", - "digest 0.10.7", - "multihash-derive", - "sha2 0.10.7", - "unsigned-varint 0.7.2", -] - [[package]] name = "multihash" version = "0.19.1" @@ -5004,20 +4854,6 @@ dependencies = [ "unsigned-varint 0.7.2", ] -[[package]] -name = "multihash-derive" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" -dependencies = [ - "proc-macro-crate", - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure", -] - [[package]] name = "multistream-select" version = "0.13.0" @@ -7349,31 +7185,6 @@ dependencies = [ "der 0.7.8", ] -[[package]] -name = "ssz_rs" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057291e5631f280978fa9c8009390663ca4613359fc1318e36a8c24c392f6d1f" -dependencies = [ - "bitvec 1.0.1", - "hex", - "num-bigint", - "serde", - "sha2 0.9.9", - "ssz_rs_derive", -] - -[[package]] -name = "ssz_rs_derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "ssz_types" version = "0.5.4" @@ -7881,7 +7692,6 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", "socket2 0.5.4", diff --git a/Cargo.toml b/Cargo.toml index 62c0e7bd20a..901ceea68be 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -136,7 +136,7 @@ r2d2 = "0.8" rand = "0.8" rayon = "1.7" regex = "1" -reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls"] } +reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } ring = "0.16" rusqlite = { version = "0.28", features = ["bundled"] } serde = { version = "1", features = ["derive"] } @@ -156,7 +156,7 @@ superstruct = "0.6" syn = "1" sysinfo = "0.26" tempfile = "3" -tokio = { version = "1", features = ["rt-multi-thread", "sync"] } +tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] } tokio-stream = { version = "0.1", features = ["sync"] } tokio-util = { version = "0.6", features = ["codec", "compat", "time"] } tree_hash = "0.5" diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 5e54b1194d4..5de697764e6 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -17,8 +17,8 @@ use bls::get_withdrawal_credentials; use execution_layer::{ auth::JwtKey, test_utils::{ - ExecutionBlockGenerator, MockBuilder, MockBuilderServer, MockExecutionLayer, - DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK, + ExecutionBlockGenerator, MockBuilder, MockExecutionLayer, DEFAULT_JWT_SECRET, + DEFAULT_TERMINAL_BLOCK, }, ExecutionLayer, }; @@ -595,7 +595,10 @@ where .execution_block_generator() } - pub fn set_mock_builder(&mut self, beacon_url: SensitiveUrl) -> MockBuilderServer { + pub fn set_mock_builder( + &mut self, + beacon_url: SensitiveUrl, + ) -> impl futures::Future { let mock_el = self .mock_execution_layer .as_ref() @@ -604,7 +607,7 @@ where let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); // Create the builder, listening on a free port. - let (mock_builder, mock_builder_server) = MockBuilder::new_for_testing( + let (mock_builder, (addr, mock_builder_server)) = MockBuilder::new_for_testing( mock_el_url, beacon_url, self.spec.clone(), @@ -612,8 +615,7 @@ where ); // Set the builder URL in the execution layer now that its port is known. - let builder_listen_addr = mock_builder_server.local_addr(); - let port = builder_listen_addr.port(); + let port = addr.port(); mock_el .el .set_builder_url( diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 3651e371883..257ff945f89 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -41,11 +41,6 @@ lazy_static = { workspace = true } ethers-core = { workspace = true } builder_client = { path = "../builder_client" } fork_choice = { workspace = true } -mev-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "216657016d5c0889b505857c89ae42c7aa2764af" } -axum = "0.6" -hyper = "0.14" -ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e380108" } -ssz_rs = "0.9.0" tokio-stream = { workspace = true } strum = { workspace = true } keccak-hash = "0.10.0" diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index c9dd196fabc..2d3cc27eb1d 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,49 +1,28 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes}; -use async_trait::async_trait; use eth2::types::{BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts}; -pub use ethereum_consensus::state_transition::Context; -use ethereum_consensus::{ - crypto::{SecretKey, Signature}, - primitives::{BlsPublicKey, BlsSignature, ExecutionAddress, Hash32, Root, U256}, - state_transition::Error, -}; use fork_choice::ForkchoiceUpdateParameters; -use mev_rs::{ - blinded_block_provider::Server as BlindedBlockProviderServer, - signing::{sign_builder_message, verify_signed_builder_message}, - types::{ - bellatrix::{ - BuilderBid as BuilderBidBellatrix, SignedBuilderBid as SignedBuilderBidBellatrix, - }, - capella::{BuilderBid as BuilderBidCapella, SignedBuilderBid as SignedBuilderBidCapella}, - BidRequest, BuilderBid, ExecutionPayload as ServerPayload, SignedBlindedBeaconBlock, - SignedBuilderBid, SignedValidatorRegistration, - }, - Error as MevError, -}; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; -use ssz::{Decode, Encode}; -use ssz_rs::{Merkleized, SimpleSerialize}; use std::collections::HashMap; use std::fmt::Debug; -use std::net::Ipv4Addr; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tempfile::NamedTempFile; use tree_hash::TreeHash; +use types::builder_bid::{BuilderBid, SignedBuilderBid}; +use types::payload::BlindedPayloadRefMut; use types::{ - Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, ForkName, Hash256, Slot, - Uint256, + Address, BeaconState, BlindedPayload, ChainSpec, EthSpec, ExecPayload, ForkName, + ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock, + SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, }; - -pub type MockBuilderServer = axum::Server< - hyper::server::conn::AddrIncoming, - axum::routing::IntoMakeService, ->; +use types::{ExecutionBlockHash, SecretKey}; +use warp::{Filter, Rejection}; #[derive(Clone)] pub enum Operation { @@ -58,119 +37,128 @@ pub enum Operation { } impl Operation { - fn apply(self, bid: &mut B) -> Result<(), MevError> { + fn apply>(self, bid: &mut B) { match self { - Operation::FeeRecipient(fee_recipient) => { - *bid.fee_recipient_mut() = to_ssz_rs(&fee_recipient)? - } - Operation::GasLimit(gas_limit) => *bid.gas_limit_mut() = gas_limit as u64, - Operation::Value(value) => *bid.value_mut() = to_ssz_rs(&value)?, - Operation::ParentHash(parent_hash) => *bid.parent_hash_mut() = to_ssz_rs(&parent_hash)?, - Operation::PrevRandao(prev_randao) => *bid.prev_randao_mut() = to_ssz_rs(&prev_randao)?, - Operation::BlockNumber(block_number) => *bid.block_number_mut() = block_number as u64, - Operation::Timestamp(timestamp) => *bid.timestamp_mut() = timestamp as u64, - Operation::WithdrawalsRoot(root) => *bid.withdrawals_root_mut()? = to_ssz_rs(&root)?, + Operation::FeeRecipient(fee_recipient) => bid.set_fee_recipient(fee_recipient), + Operation::GasLimit(gas_limit) => bid.set_gas_limit(gas_limit as u64), + Operation::Value(value) => bid.set_value(value), + Operation::ParentHash(parent_hash) => bid.set_parent_hash(parent_hash), + Operation::PrevRandao(prev_randao) => bid.set_prev_randao(prev_randao), + Operation::BlockNumber(block_number) => bid.set_block_number(block_number as u64), + Operation::Timestamp(timestamp) => bid.set_timestamp(timestamp as u64), + Operation::WithdrawalsRoot(root) => bid.set_withdrawals_root(root), } - Ok(()) } } +#[derive(Debug)] +struct Custom(String); + +impl warp::reject::Reject for Custom {} + // contains functions we need for BuilderBids.. not sure what to call this -pub trait BidStuff { - fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress; - fn gas_limit_mut(&mut self) -> &mut u64; - fn value_mut(&mut self) -> &mut U256; - fn parent_hash_mut(&mut self) -> &mut Hash32; - fn prev_randao_mut(&mut self) -> &mut Hash32; - fn block_number_mut(&mut self) -> &mut u64; - fn timestamp_mut(&mut self) -> &mut u64; - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError>; - - fn sign_builder_message( - &mut self, - signing_key: &SecretKey, - context: &Context, - ) -> Result; - - fn to_signed_bid(self, signature: BlsSignature) -> SignedBuilderBid; +pub trait BidStuff { + fn set_fee_recipient(&mut self, fee_recipient_address: Address); + fn set_gas_limit(&mut self, gas_limit: u64); + fn set_value(&mut self, value: Uint256); + fn set_parent_hash(&mut self, parent_hash: Hash256); + fn set_prev_randao(&mut self, randao: Hash256); + fn set_block_number(&mut self, block_number: u64); + fn set_timestamp(&mut self, timestamp: u64); + fn set_withdrawals_root(&mut self, withdrawals_root: Hash256); + + fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature; + + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid>; } -impl BidStuff for BuilderBid { - fn fee_recipient_mut(&mut self) -> &mut ExecutionAddress { - match self { - Self::Bellatrix(bid) => &mut bid.header.fee_recipient, - Self::Capella(bid) => &mut bid.header.fee_recipient, +impl BidStuff for BuilderBid> { + fn set_fee_recipient(&mut self, fee_recipient: Address) { + match self.header.to_mut() { + BlindedPayloadRefMut::Merge(payload) => { + payload.execution_payload_header.fee_recipient = fee_recipient; + } + BlindedPayloadRefMut::Capella(payload) => { + payload.execution_payload_header.fee_recipient = fee_recipient; + } } } - - fn gas_limit_mut(&mut self) -> &mut u64 { - match self { - Self::Bellatrix(bid) => &mut bid.header.gas_limit, - Self::Capella(bid) => &mut bid.header.gas_limit, + fn set_gas_limit(&mut self, gas_limit: u64) { + match self.header.to_mut() { + BlindedPayloadRefMut::Merge(payload) => { + payload.execution_payload_header.gas_limit = gas_limit; + } + BlindedPayloadRefMut::Capella(payload) => { + payload.execution_payload_header.gas_limit = gas_limit; + } } } - - fn value_mut(&mut self) -> &mut U256 { - match self { - Self::Bellatrix(bid) => &mut bid.value, - Self::Capella(bid) => &mut bid.value, - } + fn set_value(&mut self, value: Uint256) { + self.value = value; } - - fn parent_hash_mut(&mut self) -> &mut Hash32 { - match self { - Self::Bellatrix(bid) => &mut bid.header.parent_hash, - Self::Capella(bid) => &mut bid.header.parent_hash, + fn set_parent_hash(&mut self, parent_hash: Hash256) { + match self.header.to_mut() { + BlindedPayloadRefMut::Merge(payload) => { + payload.execution_payload_header.parent_hash = + ExecutionBlockHash::from_root(parent_hash); + } + BlindedPayloadRefMut::Capella(payload) => { + payload.execution_payload_header.parent_hash = + ExecutionBlockHash::from_root(parent_hash); + } } } - - fn prev_randao_mut(&mut self) -> &mut Hash32 { - match self { - Self::Bellatrix(bid) => &mut bid.header.prev_randao, - Self::Capella(bid) => &mut bid.header.prev_randao, + fn set_prev_randao(&mut self, prev_randao: Hash256) { + match self.header.to_mut() { + BlindedPayloadRefMut::Merge(payload) => { + payload.execution_payload_header.prev_randao = prev_randao; + } + BlindedPayloadRefMut::Capella(payload) => { + payload.execution_payload_header.prev_randao = prev_randao; + } } } - - fn block_number_mut(&mut self) -> &mut u64 { - match self { - Self::Bellatrix(bid) => &mut bid.header.block_number, - Self::Capella(bid) => &mut bid.header.block_number, + fn set_block_number(&mut self, block_number: u64) { + match self.header.to_mut() { + BlindedPayloadRefMut::Merge(payload) => { + payload.execution_payload_header.block_number = block_number; + } + BlindedPayloadRefMut::Capella(payload) => { + payload.execution_payload_header.block_number = block_number; + } } } - - fn timestamp_mut(&mut self) -> &mut u64 { - match self { - Self::Bellatrix(bid) => &mut bid.header.timestamp, - Self::Capella(bid) => &mut bid.header.timestamp, + fn set_timestamp(&mut self, timestamp: u64) { + match self.header.to_mut() { + BlindedPayloadRefMut::Merge(payload) => { + payload.execution_payload_header.timestamp = timestamp; + } + BlindedPayloadRefMut::Capella(payload) => { + payload.execution_payload_header.timestamp = timestamp; + } } } - - fn withdrawals_root_mut(&mut self) -> Result<&mut Root, MevError> { - match self { - Self::Bellatrix(_) => Err(MevError::InvalidFork), - Self::Capella(bid) => Ok(&mut bid.header.withdrawals_root), + fn set_withdrawals_root(&mut self, withdrawals_root: Hash256) { + match self.header.to_mut() { + BlindedPayloadRefMut::Merge(_) => { + panic!("no withdrawals before capella") + } + BlindedPayloadRefMut::Capella(payload) => { + payload.execution_payload_header.withdrawals_root = withdrawals_root; + } } } - fn sign_builder_message( - &mut self, - signing_key: &SecretKey, - context: &Context, - ) -> Result { - match self { - Self::Bellatrix(message) => sign_builder_message(message, signing_key, context), - Self::Capella(message) => sign_builder_message(message, signing_key, context), - } + fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature { + let domain = spec.get_builder_domain(); + let message = self.signing_root(domain); + sk.sign(message) } - fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid { - match self { - Self::Bellatrix(message) => { - SignedBuilderBid::Bellatrix(SignedBuilderBidBellatrix { message, signature }) - } - Self::Capella(message) => { - SignedBuilderBid::Capella(SignedBuilderBidCapella { message, signature }) - } + fn to_signed_bid(self, signature: Signature) -> SignedBuilderBid> { + SignedBuilderBid { + message: self, + signature, } } } @@ -180,8 +168,7 @@ pub struct MockBuilder { el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, spec: ChainSpec, - context: Arc, - val_registration_cache: Arc>>, + val_registration_cache: Arc>>, builder_sk: SecretKey, operations: Arc>>, invalidate_signatures: Arc>, @@ -193,7 +180,7 @@ impl MockBuilder { beacon_url: SensitiveUrl, spec: ChainSpec, executor: TaskExecutor, - ) -> (Self, MockBuilderServer) { + ) -> (Self, (SocketAddr, impl Future)) { let file = NamedTempFile::new().unwrap(); let path = file.path().into(); std::fs::write(&path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); @@ -209,23 +196,14 @@ impl MockBuilder { let el = ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap(); - // This should probably be done for all fields, we only update ones we are testing with so far. - let mut context = Context::for_mainnet(); - context.terminal_total_difficulty = to_ssz_rs(&spec.terminal_total_difficulty).unwrap(); - context.terminal_block_hash = to_ssz_rs(&spec.terminal_block_hash).unwrap(); - context.terminal_block_hash_activation_epoch = - to_ssz_rs(&spec.terminal_block_hash_activation_epoch).unwrap(); - let builder = MockBuilder::new( el, BeaconNodeHttpClient::new(beacon_url, Timeouts::set_all(Duration::from_secs(1))), spec, - context, ); let host: Ipv4Addr = Ipv4Addr::LOCALHOST; let port = 0; - let provider = BlindedBlockProviderServer::new(host, port, builder.clone()); - let server = provider.serve(); + let server = serve(host, port, builder.clone()).expect("mock builder server should start"); (builder, server) } @@ -233,15 +211,13 @@ impl MockBuilder { el: ExecutionLayer, beacon_client: BeaconNodeHttpClient, spec: ChainSpec, - context: Context, ) -> Self { - let sk = SecretKey::random(&mut rand::thread_rng()).unwrap(); + let sk = SecretKey::random(); Self { el, beacon_client, // Should keep spec and context consistent somehow spec, - context: Arc::new(context), val_registration_cache: Arc::new(RwLock::new(HashMap::new())), builder_sk: sk, operations: Arc::new(RwLock::new(vec![])), @@ -263,237 +239,282 @@ impl MockBuilder { *self.invalidate_signatures.write() = false; } - fn apply_operations(&self, bid: &mut B) -> Result<(), MevError> { + fn apply_operations>(&self, bid: &mut B) { let mut guard = self.operations.write(); while let Some(op) = guard.pop() { - op.apply(bid)?; + op.apply(bid); } - Ok(()) } } -#[async_trait] -impl mev_rs::BlindedBlockProvider for MockBuilder { - async fn register_validators( - &self, - registrations: &mut [SignedValidatorRegistration], - ) -> Result<(), MevError> { - for registration in registrations { - let pubkey = registration.message.public_key.clone(); - let message = &mut registration.message; - verify_signed_builder_message( - message, - ®istration.signature, - &pubkey, - &self.context, - )?; - self.val_registration_cache.write().insert( - registration.message.public_key.clone(), - registration.clone(), - ); - } - - Ok(()) - } - - async fn fetch_best_bid(&self, bid_request: &BidRequest) -> Result { - let slot = Slot::new(bid_request.slot); - let fork = self.spec.fork_name_at_slot::(slot); - let signed_cached_data = self - .val_registration_cache - .read() - .get(&bid_request.public_key) - .ok_or_else(|| convert_err("missing registration"))? - .clone(); - let cached_data = signed_cached_data.message; - - let head = self - .beacon_client - .get_beacon_blocks::(BlockId::Head) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing head block"))?; - - let block = head.data.message(); - let head_block_root = block.tree_hash_root(); - let head_execution_hash = block - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); - if head_execution_hash != from_ssz_rs(&bid_request.parent_hash)? { - return Err(custom_err(format!( - "head mismatch: {} {}", - head_execution_hash, bid_request.parent_hash - ))); - } - - let finalized_execution_hash = self - .beacon_client - .get_beacon_blocks::(BlockId::Finalized) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing finalized block"))? - .data - .message() - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); - - let justified_execution_hash = self - .beacon_client - .get_beacon_blocks::(BlockId::Justified) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing finalized block"))? - .data - .message() - .body() - .execution_payload() - .map_err(convert_err)? - .block_hash(); - - let val_index = self - .beacon_client - .get_beacon_states_validator_id( - StateId::Head, - &ValidatorId::PublicKey(from_ssz_rs(&cached_data.public_key)?), - ) - .await - .map_err(convert_err)? - .ok_or_else(|| convert_err("missing validator from state"))? - .data - .index; - let fee_recipient = from_ssz_rs(&cached_data.fee_recipient)?; - let slots_since_genesis = slot.as_u64() - self.spec.genesis_slot.as_u64(); - - let genesis_time = self - .beacon_client - .get_beacon_genesis() - .await - .map_err(convert_err)? - .data - .genesis_time; - let timestamp = (slots_since_genesis * self.spec.seconds_per_slot) + genesis_time; - - let head_state: BeaconState = self - .beacon_client - .get_debug_beacon_states(StateId::Head) - .await - .map_err(convert_err)? - .ok_or_else(|| custom_err("missing head state".to_string()))? - .data; - let prev_randao = head_state - .get_randao_mix(head_state.current_epoch()) - .map_err(convert_err)?; - - let payload_attributes = match fork { - ForkName::Merge => PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None), - // the withdrawals root is filled in by operations - ForkName::Capella => { - PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) - } - ForkName::Base | ForkName::Altair => { - return Err(MevError::InvalidFork); - } - }; - - self.el - .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) - .await; - - let forkchoice_update_params = ForkchoiceUpdateParameters { - head_root: Hash256::zero(), - head_hash: None, - justified_hash: Some(justified_execution_hash), - finalized_hash: Some(finalized_execution_hash), - }; - - let payload = self - .el - .get_full_payload_caching::>( - head_execution_hash, - &payload_attributes, - forkchoice_update_params, - fork, - ) - .await - .map_err(convert_err)? - .to_payload() - .to_execution_payload_header(); - - let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; - let mut message = match fork { - ForkName::Capella => BuilderBid::Capella(BuilderBidCapella { - header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, - value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, - public_key: self.builder_sk.public_key(), - }), - ForkName::Merge => BuilderBid::Bellatrix(BuilderBidBellatrix { - header: serde_json::from_str(json_payload.as_str()).map_err(convert_err)?, - value: to_ssz_rs(&Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI))?, - public_key: self.builder_sk.public_key(), - }), - ForkName::Base | ForkName::Altair => return Err(MevError::InvalidFork), - }; - *message.gas_limit_mut() = cached_data.gas_limit; - - self.apply_operations(&mut message)?; - let mut signature = - message.sign_builder_message(&self.builder_sk, self.context.as_ref())?; - - if *self.invalidate_signatures.read() { - signature = Signature::default(); - } - - Ok(message.to_signed_bid(signature)) - } - - async fn open_bid( - &self, - signed_block: &mut SignedBlindedBeaconBlock, - ) -> Result { - let node = match signed_block { - SignedBlindedBeaconBlock::Bellatrix(block) => { - block.message.body.execution_payload_header.hash_tree_root() - } - SignedBlindedBeaconBlock::Capella(block) => { - block.message.body.execution_payload_header.hash_tree_root() - } - } - .map_err(convert_err)?; - - let payload = self - .el - .get_payload_by_root(&from_ssz_rs(&node)?) - .ok_or_else(|| convert_err("missing payload for tx root"))?; +pub fn serve( + listen_addr: Ipv4Addr, + listen_port: u16, + builder: MockBuilder, +) -> Result<(SocketAddr, impl Future), crate::test_utils::Error> { + let inner_ctx = builder.clone(); + let ctx_filter = warp::any().map(move || inner_ctx.clone()); + + let prefix = warp::path("eth") + .and(warp::path("v1")) + .and(warp::path("builder")); + + let validators = prefix + .and(warp::path("validators")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |registrations: Vec, builder: MockBuilder| async move { + for registration in registrations { + if !registration.verify_signature(&builder.spec) { + return Err(reject("invalid signature")); + } + builder + .val_registration_cache + .write() + .insert(registration.message.pubkey, registration); + } + Ok(warp::reply()) + }, + ); - let json_payload = serde_json::to_string(&payload).map_err(convert_err)?; - serde_json::from_str(json_payload.as_str()).map_err(convert_err) - } -} + let blinded_block = prefix + .and(warp::path("blinded_blocks")) + .and(warp::body::json()) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |block: SignedBlindedBeaconBlock, builder: MockBuilder| async move { + let slot = block.slot(); + let root = match block { + SignedBlindedBeaconBlock::Base(_) | types::SignedBeaconBlock::Altair(_) => { + return Err(reject("invalid fork")); + } + SignedBlindedBeaconBlock::Merge(block) => { + block.message.body.execution_payload.tree_hash_root() + } + SignedBlindedBeaconBlock::Capella(block) => { + block.message.body.execution_payload.tree_hash_root() + } + }; + + let fork_name = builder.spec.fork_name_at_slot::(slot); + let payload = builder + .el + .get_payload_by_root(&root) + .ok_or_else(|| reject("missing payload for tx root"))?; + let resp = ForkVersionedResponse { + version: Some(fork_name), + data: payload, + }; + + let json_payload = serde_json::to_string(&resp) + .map_err(|_| reject("coudn't serialize response"))?; + Ok::<_, warp::reject::Rejection>( + warp::http::Response::builder() + .status(200) + .body( + serde_json::to_string(&json_payload) + .map_err(|_| reject("nvalid JSON"))?, + ) + .unwrap(), + ) + }, + ); -pub fn from_ssz_rs(ssz_rs_data: &T) -> Result { - U::from_ssz_bytes( - ssz_rs::serialize(ssz_rs_data) - .map_err(convert_err)? - .as_ref(), - ) - .map_err(convert_err) -} + let status = prefix + .and(warp::path("status")) + .then(|| async { warp::reply() }); + + let header = prefix + .and(warp::path("header")) + .and(warp::path::param::().or_else(|_| async { Err(reject("Invalid slot")) })) + .and( + warp::path::param::() + .or_else(|_| async { Err(reject("Invalid parent hash")) }), + ) + .and( + warp::path::param::() + .or_else(|_| async { Err(reject("Invalid pubkey")) }), + ) + .and(warp::path::end()) + .and(ctx_filter.clone()) + .and_then( + |slot: Slot, + parent_hash: ExecutionBlockHash, + pubkey: PublicKeyBytes, + builder: MockBuilder| async move { + let fork = builder.spec.fork_name_at_slot::(slot); + let signed_cached_data = builder + .val_registration_cache + .read() + .get(&pubkey) + .ok_or_else(|| reject("missing registration"))? + .clone(); + let cached_data = signed_cached_data.message; + + let head = builder + .beacon_client + .get_beacon_blocks::(BlockId::Head) + .await + .map_err(|_| reject("couldn't get head"))? + .ok_or_else(|| reject("missing head block"))?; + + let block = head.data.message(); + let head_block_root = block.tree_hash_root(); + let head_execution_hash = block + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); + if head_execution_hash != parent_hash { + return Err(reject("head mismatch")); + } + + let finalized_execution_hash = builder + .beacon_client + .get_beacon_blocks::(BlockId::Finalized) + .await + .map_err(|_| reject("couldn't get finalized block"))? + .ok_or_else(|| reject("missing finalized block"))? + .data + .message() + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); + + let justified_execution_hash = builder + .beacon_client + .get_beacon_blocks::(BlockId::Justified) + .await + .map_err(|_| reject("couldn't get justified block"))? + .ok_or_else(|| reject("missing justified block"))? + .data + .message() + .body() + .execution_payload() + .map_err(|_| reject("pre-merge block"))? + .block_hash(); + + let val_index = builder + .beacon_client + .get_beacon_states_validator_id(StateId::Head, &ValidatorId::PublicKey(pubkey)) + .await + .map_err(|_| reject("couldn't get validator"))? + .ok_or_else(|| reject("missing validator"))? + .data + .index; + let fee_recipient = cached_data.fee_recipient; + let slots_since_genesis = slot.as_u64() - builder.spec.genesis_slot.as_u64(); + + let genesis_data = builder + .beacon_client + .get_beacon_genesis() + .await + .map_err(|_| reject("couldn't get beacon genesis"))? + .data; + let genesis_time = genesis_data.genesis_time; + let timestamp = + (slots_since_genesis * builder.spec.seconds_per_slot) + genesis_time; + + let head_state: BeaconState = builder + .beacon_client + .get_debug_beacon_states(StateId::Head) + .await + .map_err(|_| reject("couldn't get state"))? + .ok_or_else(|| reject("missing state"))? + .data; + let prev_randao = head_state + .get_randao_mix(head_state.current_epoch()) + .map_err(|_| reject("couldn't get prev randao"))?; + + let payload_attributes = match fork { + ForkName::Merge => { + PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, None) + } + // the withdrawals root is filled in by operations + ForkName::Capella => { + PayloadAttributes::new(timestamp, *prev_randao, fee_recipient, Some(vec![])) + } + ForkName::Base | ForkName::Altair => { + return Err(reject("invalid fork")); + } + }; + + builder + .el + .insert_proposer(slot, head_block_root, val_index, payload_attributes.clone()) + .await; + + let forkchoice_update_params = ForkchoiceUpdateParameters { + head_root: Hash256::zero(), + head_hash: None, + justified_hash: Some(justified_execution_hash), + finalized_hash: Some(finalized_execution_hash), + }; + + let payload = builder + .el + .get_full_payload_caching::>( + head_execution_hash, + &payload_attributes, + forkchoice_update_params, + fork, + ) + .await + .map_err(|_| reject("couldn't get payload"))? + .to_payload() + .to_execution_payload_header(); + + let mut message = BuilderBid { + header: BlindedPayload::from(payload), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + _phantom_data: std::marker::PhantomData, + }; + message.set_gas_limit(cached_data.gas_limit); + + builder.apply_operations(&mut message); + + let mut signature = + message.sign_builder_message(&builder.builder_sk, &builder.spec); + + if *builder.invalidate_signatures.read() { + signature = Signature::empty(); + } + + let fork_name = builder + .spec + .fork_name_at_epoch(slot.epoch(E::slots_per_epoch())); + let signed_bid = SignedBuilderBid { message, signature }; + let resp = ForkVersionedResponse { + version: Some(fork_name), + data: signed_bid, + }; + let json_bid = serde_json::to_string(&resp) + .map_err(|_| reject("coudn't serialize signed bid"))?; + Ok::<_, Rejection>( + warp::http::Response::builder() + .status(200) + .body(json_bid) + .unwrap(), + ) + }, + ); -pub fn to_ssz_rs(ssz_data: &T) -> Result { - ssz_rs::deserialize::(&ssz_data.as_ssz_bytes()).map_err(convert_err) -} + let routes = warp::post() + .and(validators.or(blinded_block)) + .or(warp::get().and(status).or(header)) + .map(|reply| warp::reply::with_header(reply, "Server", "lighthouse-mock-builder-server")); -fn convert_err(e: E) -> MevError { - custom_err(format!("{e:?}")) + let (listening_socket, server) = warp::serve(routes) + .try_bind_ephemeral(SocketAddrV4::new(listen_addr, listen_port)) + .expect("mock builder server should start"); + Ok((listening_socket, server)) } -// This is a bit of a hack since the `Custom` variant was removed from `mev_rs::Error`. -fn custom_err(s: String) -> MevError { - MevError::Consensus(ethereum_consensus::state_transition::Error::Io( - std::io::Error::new(std::io::ErrorKind::Other, s), - )) +fn reject(msg: &'static str) -> Rejection { + warp::reject::custom(Custom(msg.to_string())) } diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 059c0275bba..f7f82781228 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -25,7 +25,7 @@ use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; pub use hook::Hook; -pub use mock_builder::{Context as MockBuilderContext, MockBuilder, MockBuilderServer, Operation}; +pub use mock_builder::{MockBuilder, Operation}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 1fbdab07ccd..6a8bd4e61ba 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -265,11 +265,7 @@ impl ApiTester { // Start the mock builder service prior to building the chain out. harness.runtime.task_executor.spawn( - async move { - if let Err(e) = mock_builder_server.await { - panic!("error in mock builder server: {e:?}"); - } - }, + async move { mock_builder_server.await }, "mock_builder_server", ); diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 8723c2afed9..a7355b68350 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -21,7 +21,7 @@ pub struct BuilderBid> { pub pubkey: PublicKeyBytes, #[serde(skip)] #[tree_hash(skip_hashing)] - _phantom_data: PhantomData, + pub _phantom_data: PhantomData, } impl> SignedRoot for BuilderBid {} diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs index de7f26cc632..174014df8ec 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator_registration_data.rs @@ -21,3 +21,17 @@ pub struct ValidatorRegistrationData { } impl SignedRoot for ValidatorRegistrationData {} + +impl SignedValidatorRegistrationData { + pub fn verify_signature(&self, spec: &ChainSpec) -> bool { + self.message + .pubkey + .decompress() + .map(|pubkey| { + let domain = spec.get_builder_domain(); + let message = self.message.signing_root(domain); + self.signature.verify(&pubkey, message) + }) + .unwrap_or(false) + } +} diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 46f523c9905..18b71afc364 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -56,7 +56,7 @@ itertools = { workspace = true } monitoring_api = { workspace = true } sensitive_url = { workspace = true } task_executor = { workspace = true } -reqwest = { workspace = true } +reqwest = { workspace = true, features = ["native-tls"] } url = { workspace = true } malloc_utils = { workspace = true } sysinfo = { workspace = true } diff --git a/validator_client/src/http_api/tests/keystores.rs b/validator_client/src/http_api/tests/keystores.rs index d60872e497b..f301af1c211 100644 --- a/validator_client/src/http_api/tests/keystores.rs +++ b/validator_client/src/http_api/tests/keystores.rs @@ -2146,7 +2146,7 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); let vals = tester.initialized_validators.read(); - let web3_vals = vals.validator_definitions().clone(); + let web3_vals = vals.validator_definitions(); // Import remotekeys. let import_res = tester @@ -2164,7 +2164,7 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); let vals = tester.initialized_validators.read(); - let remote_vals = vals.validator_definitions().clone(); + let remote_vals = vals.validator_definitions(); // Web3signer should not be overwritten since it is enabled. assert!(web3_vals == remote_vals); From 0dc95a1d3732dcc9e67f76db937a5bb862aa0db4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 3 Oct 2023 23:59:32 +0000 Subject: [PATCH 05/15] PeerManager: move the check for banned peers from connection_established (#4569) ## Issue Addressed https://github.com/sigp/lighthouse/issues/4543 ## Proposed Changes - Removes `NotBanned` from `BanResult`, implements `Display` and `std::error::Error` for `BanResult` and changes `ban_result` return type to `Option` which helps returning `BanResult` on `handle_established_inbound_connection` - moves the check from for banned peers from `on_connection_established` to `handle_established_inbound_connection` to start addressing #4543. - Removes `allow_block_list` as it's now redundant? Not sure about this one but if `PeerManager` keeps track of the banned peers, no need to send a `Swarm` event for `alow_block_list` to also keep that list right? ## Questions - #4543 refers: > More specifically, implement the connection limit behaviour inside the peer manager. @AgeManning do you mean copying `libp2p::connection_limits::Behaviour`'s code into `PeerManager`/ having it as an inner `NetworkBehaviour` of `PeerManager`/other? If it's the first two, I think it probably makes more sense to have it as it is as it's less code to maintain. > Also implement the banning of peers inside the behaviour, rather than passing messages back up to the swarm. I tried to achieve this, but we still need to pass the `PeerManagerEvent::Banned` swarm event as `DiscV5` handles it's node and ip management internally and I did not find a method to query if a peer is banned. Is there anything else we can do from here? https://github.com/sigp/lighthouse/blob/33976121601d9d31d26c1bb2e1f0676e9c4d24fc/beacon_node/lighthouse_network/src/discovery/mod.rs#L931-L940 Same as the question above, I did not find a way to check if `DiscV5` has the peer banned, so that we could check here and avoid sending `Swarm` events https://github.com/sigp/lighthouse/blob/33976121601d9d31d26c1bb2e1f0676e9c4d24fc/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs#L168-L178 Is there a chance we try to dial a peer that has been banned previously? Thanks! --- .../lighthouse_network/src/discovery/mod.rs | 2 - .../src/peer_manager/mod.rs | 4 +- .../src/peer_manager/network_behaviour.rs | 96 +++++++++------- .../src/peer_manager/peerdb.rs | 107 +++++++++--------- .../src/service/behaviour.rs | 2 - .../lighthouse_network/src/service/mod.rs | 9 -- 6 files changed, 107 insertions(+), 113 deletions(-) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 77fba905660..388790568f0 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -567,8 +567,6 @@ impl Discovery { if let Ok(node_id) = peer_id_to_node_id(peer_id) { // If we could convert this peer id, remove it from the DHT and ban it from discovery. self.discv5.ban_node(&node_id, None); - // Remove the node from the routing table. - self.discv5.remove_node(&node_id); } for ip_address in ip_addresses { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 3c9b29238a3..d8470fe6f09 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -415,7 +415,7 @@ impl PeerManager { /// Reports if a peer is banned or not. /// /// This is used to determine if we should accept incoming connections. - pub fn ban_status(&self, peer_id: &PeerId) -> BanResult { + pub fn ban_status(&self, peer_id: &PeerId) -> Option { self.network_globals.peers.read().ban_status(peer_id) } @@ -803,7 +803,7 @@ impl PeerManager { ) -> bool { { let mut peerdb = self.network_globals.peers.write(); - if !matches!(peerdb.ban_status(peer_id), BanResult::NotBanned) { + if peerdb.ban_status(peer_id).is_some() { // don't connect if the peer is banned error!(self.log, "Connection has been allowed to a banned peer"; "peer_id" => %peer_id); } diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index fedb876bb23..0617c8fa372 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -1,5 +1,6 @@ //! Implementation of [`NetworkBehaviour`] for the [`PeerManager`]. +use std::net::IpAddr; use std::task::{Context, Poll}; use futures::StreamExt; @@ -8,17 +9,17 @@ use libp2p::identity::PeerId; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dummy::ConnectionHandler; -use libp2p::swarm::{ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; -use slog::{debug, error}; +use libp2p::swarm::{ConnectionDenied, ConnectionId, NetworkBehaviour, PollParameters, ToSwarm}; +use slog::{debug, error, trace}; use types::EthSpec; use crate::discovery::enr_ext::EnrExt; +use crate::peer_manager::peerdb::BanResult; use crate::rpc::GoodbyeReason; use crate::types::SyncState; use crate::{metrics, ClearDialError}; -use super::peerdb::BanResult; -use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource}; +use super::{ConnectingType, PeerManager, PeerManagerEvent}; impl NetworkBehaviour for PeerManager { type ConnectionHandler = ConnectionHandler; @@ -169,26 +170,64 @@ impl NetworkBehaviour for PeerManager { } } + fn handle_pending_inbound_connection( + &mut self, + _connection_id: ConnectionId, + _local_addr: &libp2p::Multiaddr, + remote_addr: &libp2p::Multiaddr, + ) -> Result<(), ConnectionDenied> { + // get the IP address to verify it's not banned. + let ip = match remote_addr.iter().next() { + Some(libp2p::multiaddr::Protocol::Ip6(ip)) => IpAddr::V6(ip), + Some(libp2p::multiaddr::Protocol::Ip4(ip)) => IpAddr::V4(ip), + _ => { + return Err(ConnectionDenied::new(format!( + "Connection to peer rejected: invalid multiaddr: {remote_addr}" + ))) + } + }; + + if self.network_globals.peers.read().is_ip_banned(&ip) { + return Err(ConnectionDenied::new(format!( + "Connection to peer rejected: peer {ip} is banned" + ))); + } + + Ok(()) + } + fn handle_established_inbound_connection( &mut self, _connection_id: ConnectionId, - _peer: PeerId, + peer_id: PeerId, _local_addr: &libp2p::Multiaddr, - _remote_addr: &libp2p::Multiaddr, - ) -> Result, libp2p::swarm::ConnectionDenied> { - // TODO: we might want to check if we accept this peer or not in the future. + remote_addr: &libp2p::Multiaddr, + ) -> Result, ConnectionDenied> { + trace!(self.log, "Inbound connection"; "peer_id" => %peer_id, "multiaddr" => %remote_addr); + // We already checked if the peer was banned on `handle_pending_inbound_connection`. + if let Some(BanResult::BadScore) = self.ban_status(&peer_id) { + return Err(ConnectionDenied::new( + "Connection to peer rejected: peer has a bad score", + )); + } Ok(ConnectionHandler) } fn handle_established_outbound_connection( &mut self, _connection_id: ConnectionId, - _peer: PeerId, - _addr: &libp2p::Multiaddr, + peer_id: PeerId, + addr: &libp2p::Multiaddr, _role_override: libp2p::core::Endpoint, ) -> Result, libp2p::swarm::ConnectionDenied> { - // TODO: we might want to check if we accept this peer or not in the future. - Ok(ConnectionHandler) + trace!(self.log, "Outbound connection"; "peer_id" => %peer_id, "multiaddr" => %addr); + match self.ban_status(&peer_id) { + Some(cause) => { + error!(self.log, "Connected a banned peer. Rejecting connection"; "peer_id" => %peer_id); + Err(ConnectionDenied::new(cause)) + } + None => Ok(ConnectionHandler), + } } } @@ -215,10 +254,7 @@ impl PeerManager { // increment prometheus metrics if self.metrics_enabled { - let remote_addr = match endpoint { - ConnectedPoint::Dialer { address, .. } => address, - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, - }; + let remote_addr = endpoint.get_remote_address(); match remote_addr.iter().find(|proto| { matches!( proto, @@ -241,28 +277,6 @@ impl PeerManager { metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); } - // Check to make sure the peer is not supposed to be banned - match self.ban_status(&peer_id) { - // TODO: directly emit the ban event? - BanResult::BadScore => { - // This is a faulty state - error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id); - // Disconnect the peer. - self.goodbye_peer(&peer_id, GoodbyeReason::Banned, ReportSource::PeerManager); - // Re-ban the peer to prevent repeated errors. - self.events.push(PeerManagerEvent::Banned(peer_id, vec![])); - return; - } - BanResult::BannedIp(ip_addr) => { - // A good peer has connected to us via a banned IP address. We ban the peer and - // prevent future connections. - debug!(self.log, "Peer connected via banned IP. Banning"; "peer_id" => %peer_id, "banned_ip" => %ip_addr); - self.goodbye_peer(&peer_id, GoodbyeReason::BannedIP, ReportSource::PeerManager); - return; - } - BanResult::NotBanned => {} - } - // Count dialing peers in the limit if the peer dialed us. let count_dialing = endpoint.is_listener(); // Check the connection limits @@ -326,11 +340,7 @@ impl PeerManager { // reference so that peer manager can track this peer. self.inject_disconnect(&peer_id); - let remote_addr = match endpoint { - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, - ConnectedPoint::Dialer { address, .. } => address, - }; - + let remote_addr = endpoint.get_remote_address(); // Update the prometheus metrics if self.metrics_enabled { match remote_addr.iter().find(|proto| { diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 4a1efe8f2e9..7157a627213 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -3,10 +3,13 @@ use peer_info::{ConnectionDirection, PeerConnectionStatus, PeerInfo}; use rand::seq::SliceRandom; use score::{PeerAction, ReportSource, Score, ScoreState}; use slog::{crit, debug, error, trace, warn}; -use std::cmp::Ordering; -use std::collections::{HashMap, HashSet}; use std::net::IpAddr; use std::time::Instant; +use std::{cmp::Ordering, fmt::Display}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Formatter, +}; use sync_status::SyncStatus; use types::EthSpec; @@ -136,26 +139,18 @@ impl PeerDB { } } - /// Returns the current [`BanResult`] of the peer. This doesn't check the connection state, rather the + /// Returns the current [`BanResult`] of the peer if banned. This doesn't check the connection state, rather the /// underlying score of the peer. A peer may be banned but still in the connected state /// temporarily. /// /// This is used to determine if we should accept incoming connections or not. - pub fn ban_status(&self, peer_id: &PeerId) -> BanResult { - if let Some(peer) = self.peers.get(peer_id) { - match peer.score_state() { - ScoreState::Banned => BanResult::BadScore, - _ => { - if let Some(ip) = self.ip_is_banned(peer) { - BanResult::BannedIp(ip) - } else { - BanResult::NotBanned - } - } - } - } else { - BanResult::NotBanned - } + pub fn ban_status(&self, peer_id: &PeerId) -> Option { + self.peers + .get(peer_id) + .and_then(|peer| match peer.score_state() { + ScoreState::Banned => Some(BanResult::BadScore), + _ => self.ip_is_banned(peer).map(BanResult::BannedIp), + }) } /// Checks if the peer's known addresses are currently banned. @@ -1183,23 +1178,25 @@ pub enum BanOperation { } /// When checking if a peer is banned, it can be banned for multiple reasons. +#[derive(Copy, Clone, Debug)] pub enum BanResult { /// The peer's score is too low causing it to be banned. BadScore, /// The peer should be banned because it is connecting from a banned IP address. BannedIp(IpAddr), - /// The peer is not banned. - NotBanned, } -// Helper function for unit tests -#[cfg(test)] -impl BanResult { - pub fn is_banned(&self) -> bool { - !matches!(self, BanResult::NotBanned) +impl Display for BanResult { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + BanResult::BadScore => write!(f, "Peer has a bad score"), + BanResult::BannedIp(addr) => write!(f, "Peer address: {} is banned", addr), + } } } +impl std::error::Error for BanResult {} + #[derive(Default)] pub struct BannedPeersCount { /// The number of banned peers in the database. @@ -1852,11 +1849,11 @@ mod tests { } //check that ip1 and ip2 are banned but ip3-5 not - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(!pdb.ban_status(&p3).is_banned()); - assert!(!pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_none()); + assert!(pdb.ban_status(&p4).is_none()); + assert!(pdb.ban_status(&p5).is_none()); //ban also the last peer in peers let _ = pdb.report_peer( @@ -1868,11 +1865,11 @@ mod tests { pdb.inject_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]); //check that ip1-ip4 are banned but ip5 not - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(pdb.ban_status(&p3).is_banned()); - assert!(pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_some()); + assert!(pdb.ban_status(&p4).is_some()); + assert!(pdb.ban_status(&p5).is_none()); //peers[0] gets unbanned reset_score(&mut pdb, &peers[0]); @@ -1880,11 +1877,11 @@ mod tests { let _ = pdb.shrink_to_fit(); //nothing changed - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); - assert!(pdb.ban_status(&p3).is_banned()); - assert!(pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); + assert!(pdb.ban_status(&p3).is_some()); + assert!(pdb.ban_status(&p4).is_some()); + assert!(pdb.ban_status(&p5).is_none()); //peers[1] gets unbanned reset_score(&mut pdb, &peers[1]); @@ -1892,11 +1889,11 @@ mod tests { let _ = pdb.shrink_to_fit(); //all ips are unbanned - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); - assert!(!pdb.ban_status(&p3).is_banned()); - assert!(!pdb.ban_status(&p4).is_banned()); - assert!(!pdb.ban_status(&p5).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); + assert!(pdb.ban_status(&p3).is_none()); + assert!(pdb.ban_status(&p4).is_none()); + assert!(pdb.ban_status(&p5).is_none()); } #[test] @@ -1921,8 +1918,8 @@ mod tests { } // check ip is banned - assert!(pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_none()); // unban a peer reset_score(&mut pdb, &peers[0]); @@ -1930,8 +1927,8 @@ mod tests { let _ = pdb.shrink_to_fit(); // check not banned anymore - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); // unban all peers for p in &peers { @@ -1950,8 +1947,8 @@ mod tests { } // both IP's are now banned - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); // unban all peers for p in &peers { @@ -1967,16 +1964,16 @@ mod tests { } // nothing is banned - assert!(!pdb.ban_status(&p1).is_banned()); - assert!(!pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_none()); + assert!(pdb.ban_status(&p2).is_none()); // reban last peer let _ = pdb.report_peer(&peers[0], PeerAction::Fatal, ReportSource::PeerManager, ""); pdb.inject_disconnect(&peers[0]); //Ip's are banned again - assert!(pdb.ban_status(&p1).is_banned()); - assert!(pdb.ban_status(&p2).is_banned()); + assert!(pdb.ban_status(&p1).is_some()); + assert!(pdb.ban_status(&p2).is_some()); } #[test] diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index 6c52a07c14a..8dd750429c3 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -20,8 +20,6 @@ where AppReqId: ReqId, TSpec: EthSpec, { - /// Peers banned. - pub banned_peers: libp2p::allow_block_list::Behaviour, /// Keep track of active and pending connections to enforce hard limits. pub connection_limits: libp2p::connection_limits::Behaviour, /// The routing pub-sub mechanism for eth2. diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index e4e11f29c55..6fff6278c1b 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -337,11 +337,8 @@ impl Network { libp2p::connection_limits::Behaviour::new(limits) }; - let banned_peers = libp2p::allow_block_list::Behaviour::default(); - let behaviour = { Behaviour { - banned_peers, gossipsub, eth2_rpc, discovery, @@ -1402,15 +1399,10 @@ impl Network { Some(NetworkEvent::PeerDisconnected(peer_id)) } PeerManagerEvent::Banned(peer_id, associated_ips) => { - self.swarm.behaviour_mut().banned_peers.block_peer(peer_id); self.discovery_mut().ban_peer(&peer_id, associated_ips); None } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { - self.swarm - .behaviour_mut() - .banned_peers - .unblock_peer(peer_id); self.discovery_mut().unban_peer(&peer_id, associated_ips); None } @@ -1459,7 +1451,6 @@ impl Network { let maybe_event = match swarm_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { // Handle sub-behaviour events. - BehaviourEvent::BannedPeers(void) => void::unreachable(void), BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), // Inform the peer manager about discovered peers. From f11884ccdb1846a68fa0e85f4107eaec1b2bf505 Mon Sep 17 00:00:00 2001 From: Divma Date: Tue, 3 Oct 2023 23:59:34 +0000 Subject: [PATCH 06/15] enforce non zero enr ports (#4776) ## Issue Addressed Right now lighthouse accepts zero as enr ports. Since enr ports should be reachable, zero ports should be rejected here ## Proposed Changes - update the config to use `NonZerou16` as an ENR port for all enr-related fields. - the enr builder from config now sets the enr to the listening port only if the enr port is not already set (prev behaviour) and the listening port is not zero (new behaviour) - reject zero listening ports when used with `enr-match`. - boot node now rejects listening port as zero, since those are advertised. - generate-bootnode-enr also rejected zero listening ports for the same reason. - update local network scripts ## Additional Info Unrelated, but why do we overwrite `enr-x-port` values with listening ports if `enr-match` is present? we prob should only do this for enr values that are not already set. --- beacon_node/lighthouse_network/src/config.rs | 13 ++-- .../lighthouse_network/src/discovery/enr.rs | 50 +++++++----- beacon_node/src/config.rs | 40 ++++++---- boot_node/src/config.rs | 26 ++++--- lcli/src/generate_bootnode_enr.rs | 6 +- lighthouse/tests/beacon_node.rs | 77 +++++++++++++++---- testing/simulator/src/local_network.rs | 14 ++-- 7 files changed, 155 insertions(+), 71 deletions(-) diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index c5077448823..42260463961 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -11,6 +11,7 @@ use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use std::net::{Ipv4Addr, Ipv6Addr}; +use std::num::NonZeroU16; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -64,22 +65,22 @@ pub struct Config { pub enr_address: (Option, Option), /// The udp ipv4 port to broadcast to peers in order to reach back for discovery. - pub enr_udp4_port: Option, + pub enr_udp4_port: Option, /// The quic ipv4 port to broadcast to peers in order to reach back for libp2p services. - pub enr_quic4_port: Option, + pub enr_quic4_port: Option, /// The tcp ipv4 port to broadcast to peers in order to reach back for libp2p services. - pub enr_tcp4_port: Option, + pub enr_tcp4_port: Option, /// The udp ipv6 port to broadcast to peers in order to reach back for discovery. - pub enr_udp6_port: Option, + pub enr_udp6_port: Option, /// The tcp ipv6 port to broadcast to peers in order to reach back for libp2p services. - pub enr_tcp6_port: Option, + pub enr_tcp6_port: Option, /// The quic ipv6 port to broadcast to peers in order to reach back for libp2p services. - pub enr_quic6_port: Option, + pub enr_quic6_port: Option, /// Target number of connected peers. pub target_peers: usize, diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 3f46285a807..8eacabb4d0d 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -158,11 +158,11 @@ pub fn create_enr_builder_from_config( } if let Some(udp4_port) = config.enr_udp4_port { - builder.udp4(udp4_port); + builder.udp4(udp4_port.get()); } if let Some(udp6_port) = config.enr_udp6_port { - builder.udp6(udp6_port); + builder.udp6(udp6_port.get()); } if enable_libp2p { @@ -171,35 +171,45 @@ pub fn create_enr_builder_from_config( // the related fields should only be added when both QUIC and libp2p are enabled if !config.disable_quic_support { // If we are listening on ipv4, add the quic ipv4 port. - if let Some(quic4_port) = config - .enr_quic4_port - .or_else(|| config.listen_addrs().v4().map(|v4_addr| v4_addr.quic_port)) - { - builder.add_value(QUIC_ENR_KEY, &quic4_port); + if let Some(quic4_port) = config.enr_quic4_port.or_else(|| { + config + .listen_addrs() + .v4() + .and_then(|v4_addr| v4_addr.quic_port.try_into().ok()) + }) { + builder.add_value(QUIC_ENR_KEY, &quic4_port.get()); } // If we are listening on ipv6, add the quic ipv6 port. - if let Some(quic6_port) = config - .enr_quic6_port - .or_else(|| config.listen_addrs().v6().map(|v6_addr| v6_addr.quic_port)) - { - builder.add_value(QUIC6_ENR_KEY, &quic6_port); + if let Some(quic6_port) = config.enr_quic6_port.or_else(|| { + config + .listen_addrs() + .v6() + .and_then(|v6_addr| v6_addr.quic_port.try_into().ok()) + }) { + builder.add_value(QUIC6_ENR_KEY, &quic6_port.get()); } } // If the ENR port is not set, and we are listening over that ip version, use the listening port instead. - let tcp4_port = config - .enr_tcp4_port - .or_else(|| config.listen_addrs().v4().map(|v4_addr| v4_addr.tcp_port)); + let tcp4_port = config.enr_tcp4_port.or_else(|| { + config + .listen_addrs() + .v4() + .and_then(|v4_addr| v4_addr.tcp_port.try_into().ok()) + }); if let Some(tcp4_port) = tcp4_port { - builder.tcp4(tcp4_port); + builder.tcp4(tcp4_port.get()); } - let tcp6_port = config - .enr_tcp6_port - .or_else(|| config.listen_addrs().v6().map(|v6_addr| v6_addr.tcp_port)); + let tcp6_port = config.enr_tcp6_port.or_else(|| { + config + .listen_addrs() + .v6() + .and_then(|v6_addr| v6_addr.tcp_port.try_into().ok()) + }); if let Some(tcp6_port) = tcp6_port { - builder.tcp6(tcp6_port); + builder.tcp6(tcp6_port.get()); } } builder diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 4ab92a7fd46..48b4a8f0d85 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -21,6 +21,7 @@ use std::fmt::Debug; use std::fs; use std::net::Ipv6Addr; use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs}; +use std::num::NonZeroU16; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; @@ -1178,23 +1179,23 @@ pub fn set_network_config( if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { config.enr_udp4_port = Some( enr_udp_port_str - .parse::() - .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR discovery port: {}", enr_udp_port_str))?, ); } if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic-port") { config.enr_quic4_port = Some( enr_quic_port_str - .parse::() - .map_err(|_| format!("Invalid quic port: {}", enr_quic_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR quic port: {}", enr_quic_port_str))?, ); } if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { config.enr_tcp4_port = Some( enr_tcp_port_str - .parse::() + .parse::() .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, ); } @@ -1202,23 +1203,23 @@ pub fn set_network_config( if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp6-port") { config.enr_udp6_port = Some( enr_udp_port_str - .parse::() - .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR discovery port: {}", enr_udp_port_str))?, ); } if let Some(enr_quic_port_str) = cli_args.value_of("enr-quic6-port") { config.enr_quic6_port = Some( enr_quic_port_str - .parse::() - .map_err(|_| format!("Invalid quic port: {}", enr_quic_port_str))?, + .parse::() + .map_err(|_| format!("Invalid ENR quic port: {}", enr_quic_port_str))?, ); } if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp6-port") { config.enr_tcp6_port = Some( enr_tcp_port_str - .parse::() + .parse::() .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, ); } @@ -1226,25 +1227,38 @@ pub fn set_network_config( if cli_args.is_present("enr-match") { // Match the IP and UDP port in the ENR. - // Set the ENR address to localhost if the address is unspecified. if let Some(ipv4_addr) = config.listen_addrs().v4().cloned() { + // ensure the port is valid to be advertised + let disc_port = ipv4_addr + .disc_port + .try_into() + .map_err(|_| "enr-match can only be used with non-zero listening ports")?; + + // Set the ENR address to localhost if the address is unspecified. let ipv4_enr_addr = if ipv4_addr.addr == Ipv4Addr::UNSPECIFIED { Ipv4Addr::LOCALHOST } else { ipv4_addr.addr }; config.enr_address.0 = Some(ipv4_enr_addr); - config.enr_udp4_port = Some(ipv4_addr.disc_port); + config.enr_udp4_port = Some(disc_port); } if let Some(ipv6_addr) = config.listen_addrs().v6().cloned() { + // ensure the port is valid to be advertised + let disc_port = ipv6_addr + .disc_port + .try_into() + .map_err(|_| "enr-match can only be used with non-zero listening ports")?; + + // Set the ENR address to localhost if the address is unspecified. let ipv6_enr_addr = if ipv6_addr.addr == Ipv6Addr::UNSPECIFIED { Ipv6Addr::LOCALHOST } else { ipv6_addr.addr }; config.enr_address.1 = Some(ipv6_enr_addr); - config.enr_udp6_port = Some(ipv6_addr.disc_port); + config.enr_udp6_port = Some(disc_port); } } diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 5d7853bd247..d435efc6f5a 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -60,19 +60,25 @@ impl BootNodeConfig { // Set the Enr Discovery ports to the listening ports if not present. if let Some(listening_addr_v4) = network_config.listen_addrs().v4() { - network_config.enr_udp4_port = Some( - network_config - .enr_udp4_port - .unwrap_or(listening_addr_v4.disc_port), - ) + if network_config.enr_udp4_port.is_none() { + network_config.enr_udp4_port = + Some(network_config.enr_udp4_port.unwrap_or( + listening_addr_v4.disc_port.try_into().map_err(|_| { + "boot node enr-udp-port not set and listening port is zero" + })?, + )) + } }; if let Some(listening_addr_v6) = network_config.listen_addrs().v6() { - network_config.enr_udp6_port = Some( - network_config - .enr_udp6_port - .unwrap_or(listening_addr_v6.disc_port), - ) + if network_config.enr_udp6_port.is_none() { + network_config.enr_udp6_port = + Some(network_config.enr_udp6_port.unwrap_or( + listening_addr_v6.disc_port.try_into().map_err(|_| { + "boot node enr-udp-port not set and listening port is zero" + })?, + )) + } }; // By default this is enabled. If it is not set, revert to false. diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 0584cd65496..1d41bedc88f 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -4,16 +4,16 @@ use lighthouse_network::{ libp2p::identity::secp256k1, NetworkConfig, NETWORK_KEY_FILENAME, }; -use std::fs::File; use std::io::Write; use std::path::PathBuf; use std::{fs, net::Ipv4Addr}; +use std::{fs::File, num::NonZeroU16}; use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; - let udp_port: u16 = clap_utils::parse_required(matches, "udp-port")?; - let tcp_port: u16 = clap_utils::parse_required(matches, "tcp-port")?; + let udp_port: NonZeroU16 = clap_utils::parse_required(matches, "udp-port")?; + let tcp_port: NonZeroU16 = clap_utils::parse_required(matches, "tcp-port")?; let output_dir: PathBuf = clap_utils::parse_required(matches, "output-dir")?; let genesis_fork_version: [u8; 4] = clap_utils::parse_ssz_required(matches, "genesis-fork-version")?; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4140a3f6b42..4ed2e9c2c8c 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1274,7 +1274,12 @@ fn enr_udp_port_flag() { CommandLineTest::new() .flag("enr-udp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_udp4_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_quic_port_flag() { @@ -1282,7 +1287,12 @@ fn enr_quic_port_flag() { CommandLineTest::new() .flag("enr-quic-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_quic4_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_quic4_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_tcp_port_flag() { @@ -1290,7 +1300,12 @@ fn enr_tcp_port_flag() { CommandLineTest::new() .flag("enr-tcp-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_tcp4_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_tcp4_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_udp6_port_flag() { @@ -1298,7 +1313,12 @@ fn enr_udp6_port_flag() { CommandLineTest::new() .flag("enr-udp6-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_udp6_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_quic6_port_flag() { @@ -1306,7 +1326,12 @@ fn enr_quic6_port_flag() { CommandLineTest::new() .flag("enr-quic6-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_quic6_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_quic6_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_tcp6_port_flag() { @@ -1314,7 +1339,12 @@ fn enr_tcp6_port_flag() { CommandLineTest::new() .flag("enr-tcp6-port", Some(port.to_string().as_str())) .run_with_zero_port() - .with_config(|config| assert_eq!(config.network.enr_tcp6_port, Some(port))); + .with_config(|config| { + assert_eq!( + config.network.enr_tcp6_port.map(|port| port.get()), + Some(port) + ) + }); } #[test] fn enr_match_flag_over_ipv4() { @@ -1340,7 +1370,10 @@ fn enr_match_flag_over_ipv4() { Some((addr, udp4_port, tcp4_port)) ); assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(udp4_port) + ); }); } #[test] @@ -1368,7 +1401,10 @@ fn enr_match_flag_over_ipv6() { Some((addr, udp6_port, tcp6_port)) ); assert_eq!(config.network.enr_address, (None, Some(addr))); - assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(udp6_port) + ); }); } #[test] @@ -1416,8 +1452,14 @@ fn enr_match_flag_over_ipv4_and_ipv6() { config.network.enr_address, (Some(ipv4_addr), Some(ipv6_addr)) ); - assert_eq!(config.network.enr_udp6_port, Some(udp6_port)); - assert_eq!(config.network.enr_udp4_port, Some(udp4_port)); + assert_eq!( + config.network.enr_udp6_port.map(|port| port.get()), + Some(udp6_port) + ); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(udp4_port) + ); }); } #[test] @@ -1430,7 +1472,10 @@ fn enr_address_flag_with_ipv4() { .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] @@ -1443,7 +1488,10 @@ fn enr_address_flag_with_ipv6() { .run_with_zero_port() .with_config(|config| { assert_eq!(config.network.enr_address, (Some(addr), None)); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] @@ -1460,7 +1508,10 @@ fn enr_address_dns_flag() { config.network.enr_address.0 == Some(addr) || config.network.enr_address.1 == Some(ipv6addr) ); - assert_eq!(config.network.enr_udp4_port, Some(port)); + assert_eq!( + config.network.enr_udp4_port.map(|port| port.get()), + Some(port) + ); }); } #[test] diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 69fa8ded023..1024c46e491 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -66,8 +66,8 @@ impl LocalNetwork { BOOTNODE_PORT, QUIC_PORT, ); - beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT); - beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT); + beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); + beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT.try_into().expect("non zero")); beacon_config.network.discv5_config.table_filter = |_| true; let execution_node = if let Some(el_config) = &mut beacon_config.execution_layer { @@ -152,14 +152,16 @@ impl LocalNetwork { .expect("bootnode must have a network"), ); let count = (self.beacon_node_count() + self.proposer_node_count()) as u16; + let libp2p_tcp_port = BOOTNODE_PORT + count; + let discv5_port = BOOTNODE_PORT + count; beacon_config.network.set_ipv4_listening_address( std::net::Ipv4Addr::UNSPECIFIED, - BOOTNODE_PORT + count, - BOOTNODE_PORT + count, + libp2p_tcp_port, + discv5_port, QUIC_PORT + count, ); - beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT + count); - beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT + count); + beacon_config.network.enr_udp4_port = Some(discv5_port.try_into().unwrap()); + beacon_config.network.enr_tcp4_port = Some(libp2p_tcp_port.try_into().unwrap()); beacon_config.network.discv5_config.table_filter = |_| true; beacon_config.network.proposer_only = is_proposer; } From f7daf82430a2a4981e6dc2aac52f2d16d2b64e41 Mon Sep 17 00:00:00 2001 From: Lucas Saldanha Date: Tue, 3 Oct 2023 23:59:34 +0000 Subject: [PATCH 07/15] Removed old Teku mainnet bootnode ENRs (#4786) ## Issue Addressed N/A ## Proposed Changes Removing the two Teku mainnet bootnodes that are being sunset. ## Additional Info We are leaving only these two bootnodes: https://github.com/eth-clients/eth2-networks/blob/master/shared/mainnet/bootstrap_nodes.txt#L10-L11 --- .../built_in_network_configs/mainnet/boot_enr.yaml | 2 -- lighthouse/tests/beacon_node.rs | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml index 7442f6576e6..3c6e1bad8a5 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/boot_enr.yaml @@ -9,8 +9,6 @@ - enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg - enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg # Teku team (Consensys) -- enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA -- enr:-KG4QL-eqFoHy0cI31THvtZjpYUu_Jdw_MO7skQRJxY1g5HTN1A0epPCU6vi0gLGUgrzpU-ygeMSS8ewVxDpKfYmxMMGhGV0aDKQtTA_KgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaED8GJ2vzUqgL6-KD1xalo1CsmY4X1HaDnyl6Y_WayCo9GDdGNwgiMog3VkcIIjKA - enr:-KG4QMOEswP62yzDjSwWS4YEjtTZ5PO6r65CPqYBkgTTkrpaedQ8uEUo1uMALtJIvb2w_WWEVmg5yt1UAuK1ftxUU7QDhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQEnfA2iXNlY3AyNTZrMaEDfol8oLr6XJ7FsdAYE7lpJhKMls4G_v6qQOGKJUWGb_uDdGNwgiMog3VkcIIjKA - enr:-KG4QF4B5WrlFcRhUU6dZETwY5ZzAXnA0vGC__L1Kdw602nDZwXSTs5RFXFIFUnbQJmhNGVU6OIX7KVrCSTODsz1tK4DhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQExNYEiXNlY3AyNTZrMaECQmM9vp7KhaXhI-nqL_R0ovULLCFSFTa9CPPSdb1zPX6DdGNwgiMog3VkcIIjKA # Prysm team (Prysmatic Labs) diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 4ed2e9c2c8c..d7e751b5e85 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1199,7 +1199,7 @@ fn default_backfill_rate_limiting_flag() { } #[test] fn default_boot_nodes() { - let number_of_boot_nodes = 17; + let number_of_boot_nodes = 15; CommandLineTest::new() .run_with_zero_port() From 5bab9b866ee8e959e511466cda0647319b94a1d9 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 3 Oct 2023 23:59:35 +0000 Subject: [PATCH 08/15] Don't downscore peers on duplicate blocks (#4791) ## Issue Addressed N/A ## Proposed Changes We were currently downscoring a peer for sending us a block that we already have in fork choice. This is unnecessary as we get duplicates in lighthouse only when 1. We published the block, so the block is already in fork choice 2. We imported the same block over rpc In both scenarios, the peer who sent us the block over gossip is not at fault. This isn't exploitable as valid duplicates will get dropped by the gossipsub duplicate filter --- .../src/network_beacon_processor/gossip_methods.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index ac7479db011..323c6120755 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -763,9 +763,17 @@ impl NetworkBeaconProcessor { self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); return None; } + Err(BlockError::BlockIsAlreadyKnown) => { + debug!( + self.log, + "Gossip block is already known"; + "block_root" => %block_root, + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + return None; + } Err(e @ BlockError::FutureSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) - | Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::NotFinalizedDescendant { .. }) => { debug!(self.log, "Could not verify block for gossip. Ignoring the block"; "error" => %e); From 6ec649a4e2cab61a5b7a48d6984f0e1adfbace05 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 3 Oct 2023 23:59:37 +0000 Subject: [PATCH 09/15] Optimise head block root API (#4799) ## Issue Addressed We've had a report of sync committee performance suffering with the beacon processor HTTP API prioritisations. ## Proposed Changes Increase the priority of `/eth/v1/beacon/blocks/head/root` requests, which are used by the validator client to form sync committee messages, here: https://github.com/sigp/lighthouse/blob/441fc1691b69f9edc4bbdc6665f3efab16265c9b/validator_client/src/sync_committee_service.rs#L181-L188 Additionally, avoid loading the blinded block in all but the `block_id=block_root` case. I'm not sure why we were doing this previously, I suspect it was just an oversight during the implementation of the `finalized` status on API requests. ## Additional Info I think this change should have minimal negative impact as: - The block root endpoint is quick to compute (a few ms max). - Only the priority of `head` requests is increased. Analytical processes that are making lots of block root requests for past slots are unable to DoS the beacon processor, as their requests will still be processed after attestations. --- beacon_node/http_api/src/lib.rs | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index cde05e68215..20f0a7fb31a 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1629,13 +1629,19 @@ pub fn serve( |block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (block, execution_optimistic, finalized) = - block_id.blinded_block(&chain)?; - Ok(api_types::GenericResponse::from(api_types::RootData::from( - block.canonical_root(), - )) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) + // Prioritise requests for the head block root, as it is used by some VCs (including + // the Lighthouse VC) to create sync committee messages. + let priority = if let BlockId(eth2::types::BlockId::Head) = block_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let (block_root, execution_optimistic, finalized) = block_id.root(&chain)?; + Ok( + api_types::GenericResponse::from(api_types::RootData::from(block_root)) + .add_execution_optimistic_finalized(execution_optimistic, finalized), + ) }) }, ); From ba8bcf4bd344065da021adb8b7f2115c9052dd19 Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Wed, 4 Oct 2023 00:43:28 +0000 Subject: [PATCH 10/15] Remove deficit gossipsub scoring during topic transition (#4486) ## Issue Addressed This PR closes https://github.com/sigp/lighthouse/issues/3237 ## Proposed Changes Remove topic weight of old topics when the fork happens. ## Additional Info - Divided `NetworkService::start()` into `NetworkService::build()` and `NetworkService::start()` for ease of testing. --- .../lighthouse_network/src/service/mod.rs | 33 +++++ beacon_node/network/src/service.rs | 38 ++++- beacon_node/network/src/service/tests.rs | 140 +++++++++++++++++- 3 files changed, 204 insertions(+), 7 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 6fff6278c1b..e70cda69756 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -26,6 +26,7 @@ use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettin use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::{ self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError, + TopicScoreParams, }; use libp2p::identify; use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; @@ -618,6 +619,38 @@ impl Network { } } + /// Remove topic weight from all topics that don't have the given fork digest. + pub fn remove_topic_weight_except(&mut self, except: [u8; 4]) { + let new_param = TopicScoreParams { + topic_weight: 0.0, + ..Default::default() + }; + let subscriptions = self.network_globals.gossipsub_subscriptions.read().clone(); + for topic in subscriptions + .iter() + .filter(|topic| topic.fork_digest != except) + { + let libp2p_topic: Topic = topic.clone().into(); + match self + .gossipsub_mut() + .set_topic_params(libp2p_topic, new_param.clone()) + { + Ok(_) => debug!(self.log, "Removed topic weight"; "topic" => %topic), + Err(e) => { + warn!(self.log, "Failed to remove topic weight"; "topic" => %topic, "error" => e) + } + } + } + } + + /// Returns the scoring parameters for a topic if set. + pub fn get_topic_params(&self, topic: GossipTopic) -> Option<&TopicScoreParams> { + self.swarm + .behaviour() + .gossipsub + .get_topic_params(&topic.into()) + } + /// Subscribes to a gossipsub topic. /// /// Returns `true` if the subscription was successful and `false` otherwise. diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 174a0ec14c6..aa92e0afdab 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -215,15 +215,18 @@ pub struct NetworkService { } impl NetworkService { - #[allow(clippy::type_complexity)] - pub async fn start( + async fn build( beacon_chain: Arc>, config: &NetworkConfig, executor: task_executor::TaskExecutor, gossipsub_registry: Option<&'_ mut Registry>, beacon_processor_send: BeaconProcessorSend, beacon_processor_reprocess_tx: mpsc::Sender, - ) -> error::Result<(Arc>, NetworkSenders)> { + ) -> error::Result<( + NetworkService, + Arc>, + NetworkSenders, + )> { let network_log = executor.log().clone(); // build the channels for external comms let (network_senders, network_recievers) = NetworkSenders::new(); @@ -369,6 +372,28 @@ impl NetworkService { enable_light_client_server: config.enable_light_client_server, }; + Ok((network_service, network_globals, network_senders)) + } + + #[allow(clippy::type_complexity)] + pub async fn start( + beacon_chain: Arc>, + config: &NetworkConfig, + executor: task_executor::TaskExecutor, + gossipsub_registry: Option<&'_ mut Registry>, + beacon_processor_send: BeaconProcessorSend, + beacon_processor_reprocess_tx: mpsc::Sender, + ) -> error::Result<(Arc>, NetworkSenders)> { + let (network_service, network_globals, network_senders) = Self::build( + beacon_chain, + config, + executor.clone(), + gossipsub_registry, + beacon_processor_send, + beacon_processor_reprocess_tx, + ) + .await?; + network_service.spawn_service(executor); Ok((network_globals, network_senders)) @@ -882,9 +907,10 @@ impl NetworkService { fn update_next_fork(&mut self) { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + let new_fork_digest = new_enr_fork_id.fork_digest; let fork_context = &self.fork_context; - if let Some(new_fork_name) = fork_context.from_context_bytes(new_enr_fork_id.fork_digest) { + if let Some(new_fork_name) = fork_context.from_context_bytes(new_fork_digest) { info!( self.log, "Transitioned to new fork"; @@ -907,6 +933,10 @@ impl NetworkService { Box::pin(next_fork_subscriptions_delay(&self.beacon_chain).into()); self.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); info!(self.log, "Network will unsubscribe from old fork gossip topics in a few epochs"; "remaining_epochs" => UNSUBSCRIBE_DELAY_EPOCHS); + + // Remove topic weight from old fork topics to prevent peers that left on the mesh on + // old topics from being penalized for not sending us messages. + self.libp2p.remove_topic_weight_except(new_fork_digest); } else { crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); } diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 23bcf456dee..35a7f1eab73 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -4,14 +4,26 @@ mod tests { use crate::persisted_dht::load_dht; use crate::{NetworkConfig, NetworkService}; use beacon_chain::test_utils::BeaconChainHarness; - use beacon_processor::BeaconProcessorChannels; - use lighthouse_network::Enr; + use beacon_chain::BeaconChainTypes; + use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; + use futures::StreamExt; + use lighthouse_network::types::{GossipEncoding, GossipKind}; + use lighthouse_network::{Enr, GossipTopic}; use slog::{o, Drain, Level, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; use std::sync::Arc; use tokio::runtime::Runtime; - use types::MinimalEthSpec; + use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; + + impl NetworkService { + fn get_topic_params( + &self, + topic: GossipTopic, + ) -> Option<&lighthouse_network::libp2p::gossipsub::TopicScoreParams> { + self.libp2p.get_topic_params(topic) + } + } fn get_logger(actual_log: bool) -> Logger { if actual_log { @@ -102,4 +114,126 @@ mod tests { "should have persisted the second ENR to store" ); } + + // Test removing topic weight on old topics when a fork happens. + #[test] + fn test_removing_topic_weight_on_old_topics() { + let runtime = Arc::new(Runtime::new().unwrap()); + + // Capella spec + let mut spec = MinimalEthSpec::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(1)); + + // Build beacon chain. + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec.clone()) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build() + .chain; + let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); + assert_eq!(next_fork_name, ForkName::Capella); + + // Build network service. + let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { + let (_, exit) = exit_future::signal(); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = task_executor::TaskExecutor::new( + Arc::downgrade(&runtime), + exit, + get_logger(false), + shutdown_tx, + ); + + let mut config = NetworkConfig::default(); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); + config.discv5_config.table_filter = |_| true; // Do not ignore local IPs + config.upnp_enabled = false; + + let beacon_processor_channels = + BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); + NetworkService::build( + beacon_chain.clone(), + &config, + executor.clone(), + None, + beacon_processor_channels.beacon_processor_tx, + beacon_processor_channels.work_reprocessing_tx, + ) + .await + .unwrap() + }); + + // Subscribe to the topics. + runtime.block_on(async { + while network_globals.gossipsub_subscriptions.read().len() < 2 { + if let Some(msg) = network_service.attestation_service.next().await { + network_service.on_attestation_service_msg(msg); + } + } + }); + + // Make sure the service is subscribed to the topics. + let (old_topic1, old_topic2) = { + let mut subnets = SubnetId::compute_subnets_for_epoch::( + network_globals.local_enr().node_id().raw().into(), + beacon_chain.epoch().unwrap(), + &spec, + ) + .unwrap() + .0 + .collect::>(); + assert_eq!(2, subnets.len()); + + let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; + let old_topic1 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + let old_topic2 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + + (old_topic1, old_topic2) + }; + let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); + assert_eq!(2, subscriptions.len()); + assert!(subscriptions.contains(&old_topic1)); + assert!(subscriptions.contains(&old_topic2)); + let old_topic_params1 = network_service + .get_topic_params(old_topic1.clone()) + .expect("topic score params"); + assert!(old_topic_params1.topic_weight > 0.0); + let old_topic_params2 = network_service + .get_topic_params(old_topic2.clone()) + .expect("topic score params"); + assert!(old_topic_params2.topic_weight > 0.0); + + // Advance slot to the next fork + for _ in 0..MinimalEthSpec::slots_per_epoch() { + beacon_chain.slot_clock.advance_slot(); + } + + // Run `NetworkService::update_next_fork()`. + runtime.block_on(async { + network_service.update_next_fork(); + }); + + // Check that topic_weight on the old topics has been zeroed. + let old_topic_params1 = network_service + .get_topic_params(old_topic1) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params1.topic_weight); + + let old_topic_params2 = network_service + .get_topic_params(old_topic2) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params2.topic_weight); + } } From 7d537214b7a337cc18440cc1827502219e988bee Mon Sep 17 00:00:00 2001 From: duguorong009 Date: Wed, 4 Oct 2023 00:43:29 +0000 Subject: [PATCH 11/15] fix(validator_client): return http 404 rather than 405 in http api (#4758) ## Issue Addressed - Close #4596 ## Proposed Changes - Add `Filter::recover` to handle rejections specifically as 404 NOT FOUND Please list or describe the changes introduced by this PR. ## Additional Info Similar to PR #3836 --- validator_client/src/http_api/mod.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index f654833cbb4..669edc67186 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -1176,7 +1176,8 @@ pub fn serve( .or(get_fee_recipient) .or(get_gas_limit) .or(get_std_keystores) - .or(get_std_remotekeys), + .or(get_std_remotekeys) + .recover(warp_utils::reject::handle_rejection), ) .or(warp::post().and( post_validators @@ -1187,15 +1188,18 @@ pub fn serve( .or(post_fee_recipient) .or(post_gas_limit) .or(post_std_keystores) - .or(post_std_remotekeys), + .or(post_std_remotekeys) + .recover(warp_utils::reject::handle_rejection), )) - .or(warp::patch().and(patch_validators)) + .or(warp::patch() + .and(patch_validators.recover(warp_utils::reject::handle_rejection))) .or(warp::delete().and( delete_lighthouse_keystores .or(delete_fee_recipient) .or(delete_gas_limit) .or(delete_std_keystores) - .or(delete_std_remotekeys), + .or(delete_std_remotekeys) + .recover(warp_utils::reject::handle_rejection), )), ) // The auth route and logs are the only routes that are allowed to be accessed without the API token. From 4b619c63d71b35487c80f9a6e938cdd816f94d13 Mon Sep 17 00:00:00 2001 From: Nico Flaig Date: Thu, 5 Oct 2023 02:14:55 +0000 Subject: [PATCH 12/15] Exit aggregation step early if no validator is aggregator (#4774) ## Issue Addressed Closes https://github.com/sigp/lighthouse/issues/4712 ## Proposed Changes Exit aggregation step early if no validator is aggregator. This avoids an unnecessary request to the beacon node and more importantly fixes noisy errors if Lighthouse VC is used with other clients such as Lodestar and Prysm. ## Additional Info Related issue https://github.com/ChainSafe/lodestar/issues/5553 --- validator_client/src/attestation_service.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index f0a9258c747..1b7b391a0c8 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -491,6 +491,14 @@ impl AttestationService { ) -> Result<(), String> { let log = self.context.log(); + if !validator_duties + .iter() + .any(|duty_and_proof| duty_and_proof.selection_proof.is_some()) + { + // Exit early if no validator is aggregator + return Ok(()); + } + let aggregated_attestation = &self .beacon_nodes .first_success( From 72563ffb417b6b7c0ea00e824f4f8505b08eaf84 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 5 Oct 2023 16:38:06 +1100 Subject: [PATCH 13/15] Fix CI tests --- .github/workflows/test-suite.yml | 3 ++- Makefile | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 6d5e0ede5ff..1d80feaddf4 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -304,7 +304,8 @@ jobs: run: | make - name: Install lcli - if: env.SELF_HOSTED_RUNNERS == 'false' +# TODO(jimmy): re-enable this once we merge deneb into unstable +# if: env.SELF_HOSTED_RUNNERS == 'false' run: make install-lcli - name: Run the doppelganger protection failure test script run: | diff --git a/Makefile b/Makefile index 12a939bde13..05aafa8b37c 100644 --- a/Makefile +++ b/Makefile @@ -111,7 +111,7 @@ test-release: # Runs the full workspace tests in **release**, without downloading any additional # test vectors, using nextest. nextest-release: - cargo nextest run --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher + cargo nextest run --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher --exclude network # Runs the full workspace tests in **debug**, without downloading any additional test # vectors. @@ -121,7 +121,7 @@ test-debug: # Runs the full workspace tests in **debug**, without downloading any additional test # vectors, using nextest. nextest-debug: - cargo nextest run --workspace --exclude ef_tests --exclude beacon_chain + cargo nextest run --workspace --exclude ef_tests --exclude beacon_chain --exclude network # Runs cargo-fmt (linter). cargo-fmt: From b82f7843ffb173826e42ecd46c394b101c8133b4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 5 Oct 2023 06:03:24 +0000 Subject: [PATCH 14/15] Use peeking_take_while in BlockReplayer (#4803) ## Issue Addressed While reviewing #4801 I noticed that our use of `take_while` in the block replayer means that if a state root iterator _with gaps_ is provided, some additonal state roots will be dropped unnecessarily. In practice the impact is small, because once there's _one_ state root miss, the whole tree hash cache needs to be built anyway, and subsequent misses are less costly. However this was still a little inefficient, so I figured it's better to fix it. ## Proposed Changes Use [`peeking_take_while`](https://docs.rs/itertools/latest/itertools/trait.Itertools.html#method.peeking_take_while) to avoid consuming the next element when checking whether it satisfies the slot predicate. ## Additional Info There's a gist here that shows the basic dynamics in isolation: https://play.rust-lang.org/?version=stable&mode=debug&edition=2021&gist=40b623cc0febf9ed51705d476ab140c5. Changing the `peeking_take_while` to a `take_while` causes the assert to fail. Similarly I've added a new test `block_replayer_peeking_state_roots` which fails if the same change is applied inside `get_state_root`. --- .../state_processing/src/block_replayer.rs | 10 ++-- .../src/per_block_processing/tests.rs | 50 ++++++++++++++++++- 2 files changed, 55 insertions(+), 5 deletions(-) diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index ed5e6429412..f502d7f692c 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -3,6 +3,8 @@ use crate::{ BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, VerifyBlockRoot, }; +use itertools::Itertools; +use std::iter::Peekable; use std::marker::PhantomData; use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; @@ -25,7 +27,7 @@ pub struct BlockReplayer< 'a, Spec: EthSpec, Error = BlockReplayError, - StateRootIter = StateRootIterDefault, + StateRootIter: Iterator> = StateRootIterDefault, > { state: BeaconState, spec: &'a ChainSpec, @@ -36,7 +38,7 @@ pub struct BlockReplayer< post_block_hook: Option>, pre_slot_hook: Option>, post_slot_hook: Option>, - state_root_iter: Option, + pub(crate) state_root_iter: Option>, state_root_miss: bool, _phantom: PhantomData, } @@ -138,7 +140,7 @@ where /// `self.state.slot` to the `target_slot` supplied to `apply_blocks` (inclusive of both /// endpoints). pub fn state_root_iter(mut self, iter: StateRootIter) -> Self { - self.state_root_iter = Some(iter); + self.state_root_iter = Some(iter.peekable()); self } @@ -192,7 +194,7 @@ where // If a state root iterator is configured, use it to find the root. if let Some(ref mut state_root_iter) = self.state_root_iter { let opt_root = state_root_iter - .take_while(|res| res.as_ref().map_or(true, |(_, s)| *s <= slot)) + .peeking_take_while(|res| res.as_ref().map_or(true, |(_, s)| *s <= slot)) .find(|res| res.as_ref().map_or(true, |(_, s)| *s == slot)) .transpose()?; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index df5aa9f7a60..8f7fb43b1da 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -5,7 +5,7 @@ use crate::per_block_processing::errors::{ DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex, ProposerSlashingInvalid, }; -use crate::{per_block_processing, StateProcessingStrategy}; +use crate::{per_block_processing, BlockReplayError, BlockReplayer, StateProcessingStrategy}; use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, @@ -1035,3 +1035,51 @@ async fn fork_spanning_exit() { ) .expect_err("phase0 exit does not verify against bellatrix state"); } + +/// Check that the block replayer does not consume state roots unnecessarily. +#[tokio::test] +async fn block_replayer_peeking_state_roots() { + let harness = get_harness::(EPOCH_OFFSET, VALIDATOR_COUNT).await; + + let target_state = harness.get_current_state(); + let target_block_root = harness.head_block_root(); + let target_block = harness + .chain + .get_blinded_block(&target_block_root) + .unwrap() + .unwrap(); + + let parent_block_root = target_block.parent_root(); + let parent_block = harness + .chain + .get_blinded_block(&parent_block_root) + .unwrap() + .unwrap(); + let parent_state = harness + .chain + .get_state(&parent_block.state_root(), Some(parent_block.slot())) + .unwrap() + .unwrap(); + + // Omit the state root for `target_state` but provide a dummy state root at the *next* slot. + // If the block replayer is peeking at the state roots rather than consuming them, then the + // dummy state should still be there after block replay completes. + let dummy_state_root = Hash256::repeat_byte(0xff); + let dummy_slot = target_state.slot() + 1; + let state_root_iter = vec![Ok::<_, BlockReplayError>((dummy_state_root, dummy_slot))]; + let block_replayer = BlockReplayer::new(parent_state, &harness.chain.spec) + .state_root_iter(state_root_iter.into_iter()) + .no_signature_verification() + .apply_blocks(vec![target_block], None) + .unwrap(); + + assert_eq!( + block_replayer + .state_root_iter + .unwrap() + .next() + .unwrap() + .unwrap(), + (dummy_state_root, dummy_slot) + ); +} From a96963fd5f458857fbb84f3ef59a99d66a685d36 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 6 Oct 2023 00:24:09 +1100 Subject: [PATCH 15/15] Re-commit corrupted key files --- .../web3signer_tests/tls/lighthouse/key.p12 | Bin 7852 -> 4371 bytes .../web3signer_tests/tls/web3signer/key.p12 | Bin 7727 -> 4371 bytes 2 files changed, 0 insertions(+), 0 deletions(-) diff --git a/testing/web3signer_tests/tls/lighthouse/key.p12 b/testing/web3signer_tests/tls/lighthouse/key.p12 index aaf43e56f479d3eb78c026625218bd4535948010..73468fa084b6f5f1b036afd643967e361d004fc4 100644 GIT binary patch literal 4371 zcmai&Ra6uV*M*rOhwcVZq-%hoTLh$)?hr`<0a3b!?nVLWp&4S3hM^of29WMhkPa!S z&u{(zdf&_M;;ggI-WU7w+3SFzWF$ZUCKyVJ!U1tdyok6Y0^kDjp`?viP*TKSxf%?` zDg4*M$%o>k{*@Ae0F1vY>R$%{HvH$nCIYL0x&A!~!HigVr3A+yIKx5^5Qv8XCd9$} zzce5S8w1RQ1G0*E0kFly0&ruIGO#mbW;JP_AiQPX&$c~M4u;~eEaD#B)6&$NAyv~x zekn?*qez(oij&tt*X|$b+tSW!+`Ty{JoMnIlLu0XTzaf>g^V0R%GlKWMyLkPyLNFz zc>!BzGKl6DZ#hj$CziZDgS$IgqA7*jqkC_~sb?fXpgVmrZoEcXJp0m_eO~obN+99T z9+&;lIeG2)`RkO&=9J4%WE+?-bt?2)aB3Y5@>=YrWwNn`83$O zLVnXl3qwO3tlS{HPGTBHWFaV%QKbq7>^4bbnDtSQ)gDa6ntBM&K5%{0F?nu{Hwk%j zhYf_VV{-U%xxKVd0%^N|p)f}E(mz2wEj*BPZ`%bmir{35e#VVhw0!$UimMz)O4(5Y zRgdm2CS&V`cY&E^jk@7Y7-dzqxv6`Gs+ciGQn zZP2vQ4rlKc#3Dp>1WvBnxEAf!f9Wh5{oIbeQBG%%)&;&1obaAWDuXdkQ~kDzn{$e` zXc0=*Yd8Q&@_WW;z0pMSW1}NQuU4jeAroC*zMXL450O?guSRerC(6}C(aN)Y=}F~R z>Nu4OPk|^rDR9AI{GW2P&=Q6tK|}B2oYHPz&gyF%%A^%T+&64Gz~N#(MS(O8^J9|^ z%uViXYewTR=Qkxi0%}jQcqR$~Rn11lBk75UZgPgN#d2g=KccZ24!&-ajXPTiW8rR# zk^d&nHkPd}UuMgk*X?iXGAmXTjxfra95qM@viQxF{3NBbTfFq97n!(q_XuE;gr22f z(Fc{$g%e{rTy@#cu~NRuK56XeH`c8}#e$aYC|ldD?}EZLUDEKcQ~AX9$7VG(FCkwP zzMEDcsfS%f(0T&8B1=z1r_?vdbgC}g00GQnD~bNuR8Nz)BKT#L70RSCwg-+lV_Mi_ z#ua4AGAj&egV!U$56&I+6EDn$Q$jy_o}6e0V?c5fY6P|$+Fr?+3g10(;P>A1QWd+) z#^XKHJzF*!*^}#6D@cs9XlN=_pn;N1&Ui-x(~X^q{O@VQtQ3?Vd|7(zo9Bc?qWzdK zUBV6Ha550hPO;aQyzqg#R7Ocaj0uSyQ^w)a(V{xvweoe3QCPYB z*sGz#NbcFBz&G$En_|`e?yJ4HiU!8r>~?GnzGxGChCxqcNEigt*!yq}5@F?14fT;a zk;^KJUZU+Z5ACGaFKckspVoko{fv?Jw_{p9_6p~CR3%R*+wqE9DlXOqMV~wNkujB+ zMA_Mxe&we6VOZ`*_rz5{<_CthrO&(<_{hS%$+aHN@`2RGCC%Rv^3b{K42E1e$@{`n(p0@;O2E&nn-I%-pC+&9h!;4Rl1Fo0T&o`I^-r z_bl5~YNdD3nso9OTTP+S;-8IZEw#4?jerMVUGU2^(6W9H3IX6sJuSgZL`P*cI&IA?pNevtJwDMFQzTR$27$EH1$MM; ziCg10?~uy{`u4{yw+C4}m(we&lywtg8*PC(9kuu~ zks-NgHy9Lcrr(l9u1kN(wenCOBRLIu!bMyh)>x2t94tk_Q(JXn3V-Ss`VkDp$NGmi z{{NkF(I|9mQW%;gD}gshKKAhS)brKyniw-;4e+4Tpun3$cGuD@|; zTC-;cFEf;<5X||l``cHKBCj*|6P)l0`UOt+2~XRWa6P~}zQZs0t^K(LXy1obkJ|`7 zmI5zeu>tvD?Q%TVl&2s1_9(&%Ot!x`^E}&hJROzfQ5{eb$_{_C>p!pF6Y85mHGG1X z>f-tb(-Q_GU{s7TQn&r|b1B!fR64?x*9YJfHr`-Sd_n;q=VTU(v~3_*XYXfR%M-WE zU>e!3mhvQnrcO~S8pRwuhny-HvG&?AzxbEiw?yWhiuavyBI1|cXWn8)uKv&Z~ zV)p#{sMU85b9m31H1j5Cd@YO2JSZ8=@NE3QsuPGhBx({q4vbBxF`lyQTQi6V!5(dJ z%Mka*k%(^&htSOthiWaH8~&+<1r>@8F4ggaTo~b{#D3;K^xk&Yz(qpp7+PD%wdqnq z-(jyH3eGXZq6lbz<_i_ye5I7GA$bS(F%`XB%5eFJml6s9ID`&{lkH^WZMI%xyL5T=nx0u%B^YI zCuGx2i@W$RZW5~_Krwo!y|p4GE`&j~d>^u)Mn^MoT^cr$db!*mcwFOAsa7?gbJ|el z+9yiD-43*pWz{1EE{5Q|huo&b=3FBTQSGbZ9^&A3uQvbJ8f90Y#dM$C>rJ5ty0ynr z@d&Q&XEw!3MIZ-?l2c-SzHG1^J7T0tMO(RyTJe2y3~XhE)WNL`n2;7$t~86pwcv=~x{}2ab_!ML+Ytl6h4Pi$Uapjp!keS+2@O-?xeDu5;&F`EAcCNROgtgl}OOg3^<0KAzE%^ zld``uD%it}ilU~va<9XoaT42*I&31>FJiCbI=ymzn6p2@Pr=_qg=_N@J_X*+XUxi3 zs!)8yoK!?o*?7`wJI?Z46NP2zI2pLF3jN+VA#V3?A^zsp;2+2W!y4bH$bqhr{3LX* zYqoUhs{$uYDh*0Q`piz&7xupm$(T~!ECe}SvF4gtOAl4Kr#PVDJ}wv7OB=7Ok8R|c zoBhJo8_QV&WHEpj)d1*n?oRjW8Yu^NHve%l(Hy68Kieax^)V)QD8M5!KwgoPSOtpe zDyT0{@FF29srhv~JAhWz5Vck=F&VC$`z)~QgA=XFT#qyxJ+wJYK#AH*vcI@xP>$p- zgT{Jm0C-_rJVz;Y{X8^a&4y+5xLXW?!vb5!`$}h;)dFc>!)NqYrI%zrcv;V7_JqYr zYQd%^FF$-rm-8^z;?sM9jgoFy98v*ynDN6E4OiEC9X|K+GS(_AcKrm_W>9+6o!i6f zR5F;#n(Cw5HMF~VDya5(pugCvy@mZ%-wxYy6bdm<+lAi&78axL-n?80;C9A;zxFaU zoj%Wk>`fy2d#F{7PcWQ|96etiyVO$N{FID+(RN%S;wz3*zX&--8KM`web2J= zBMRjwutdYMIi$Mgt3Ey0%C|@x-FbWq7fAD6ZUyOZT?$8sQuubiijJiO#Do6i6MhHu#LMc47M6mJlLIV$e)wG`og z26*qk89lO`Tva}y5T%uzqne{y^}1X=(CwiUbR?04IR8qyEioX*?;KXh|Gw46aTQDa&mkReeV$LZajU0OJBsSY>qt@pR~nKCHBc4IxIBJ0%l66& zd{Yby2{K~d6fdUczY)H#K&}c!xN1f3Mi_Dz;_z=&RpRzhemUQryEa8?D~?ow(Xw9E z_f`HEru4UBpC(dcy(d~^x8B}XWV-!0A(6ll#D42J-^9$@6;2^W-%Uj$(qbTng`9Fk zFvJ9o=`*8eT*%#jn-nHtv;$*&>-!jPe`a07E3ScE$)RSrRW2U(KK^+Wrc(8Ab8EFJ!kNFAe{hjEHLF>VVOn3D!Np zWNv-LB%uEix{K?N5#DYo)<^cC^dP^f)jV^d$6CeCk8f|b-g zD)?k1wx|4e%!YUcOo_Ms5gukwtc-e~^rj;|eb^s~v}*4zWb&TW$cUf8HvVunk5s~= ztw{QEYR&ehw?iF@FPcR%v1j2_Mh~A$8{bYDo42#}j5XJ@lc=9izw+ShTTK2bY^Nmk z#iQM0@DUCoZN|4FPwm3H!9Y|iS%At6>rUR8U@~yyacriPcT~ZKnt;?Hk$6nrk>63% zSIKO`2()srxhn;!`@t9z;v)AHAsDl>e^dne{E7Mpi;X_L&%t=G*@@jnedfDBYPCjt zxae8^uI{+tEAz|jO~@q+p%t8&Vs)+h)+rpDHjv26WgKYH71hvMl~DOOn;iLlK$jgUV2#vX_DKm@#~> zohbDDsRukN~OeX z{(!YDo|dJuyxu}Z_%f4HL`b(yQnOQdAPiAdDDz|M=jj%3A=GURru|mZqnw$HW(@F| znT1#%D*P3ZG~AZwK`G~MERTeT+d>q#jK2?Xjdf$b*+;u8#Vb1owdLtM&DpwzI1gHr zS(UzE{pj0Cn#h&VdpjMZ+{L3F%CjH7_5B;%?^dtq7@Ln**=OaET z>QeuZ@=4Rb@+Pd71Es65T1Y0Zgbhlm2hfeMW(DYP`^CM&ABeceSe5JG`|KP7#k~1ruS@wWehjf9i{#@Vq+u+Zg(3mb z%YtlR)_)2fh)R$ei_*_qRlgj&k;=0gVZ%FVUT}o3eg#$l3xbLMc_#n?*cezWn^q~a uJd}-fxwW7H*w&c=YBk6}Udc*44EFeYeHJSl79JV3Sd*hn^}~Ow-hTkP>l@wx literal 7852 zcmb7J=~LY09p7Epg&lS|mm}OqfN(E2EG$O|_mx1n!V(|}SHc1z9LsGR+uE^HHK~p> zt#;a~GisdH*i@agI-`R|6LV;)O^k8IH|?}9eA7RmpAX+BKeoJTXFlKa_w4U^p6|6B z_V3^SOGuDG?_-A&3^JKB!J9yURqv-4jgNdml#iBDnn-&-LZ5o{Xo*3ud!+7Nt+)5> zaT%@l7JJ5Cr^O!Y>yOt4fXtv6(3ZjAud`cB{@!Ocn2kDnoXI?b1x@-fA76c@Z*WW+ z*9$r!JkE1pT}@~Zhba-#PxPorNEfJCZ1B4BR`B4s4b8*-&n`u4Hcd}@*!)o_>T?xP zGmfTIL&T;RG#Ddt`?s$gfCZF^08bUo%uW5j`9(z+WT^OfC7@hc^CH1dgKqphPwcOM zcI)?*>V4B`@Tu)LE%H2LLZo^rj1!=wAscvd6<9ooyYlE7HlZ!-o)T7J&E0o-7d`

*E zk#9Q`%?l4$_JMbdfdS)NT?c8m*UvGxfNi}_6y5T}%g2t34n7+d&~9hsVnLfZZ48Wn zpDz@3a%8S3`*dkrKX(U?f$BjqMli*bsc_5bM18il1Wfg&Wc%zmuFo1&R1ZqUgRHusq9u8tOeTqK2`r!)c@9WJs|jlDeioH~g$la%kbCQ^X|_s3VExIlHR(cN(O!rEdWnUG|VpiIPv?`fU{J&!F2OYAX2 zW5oyhGQ$9;L5hvh6b!Jmwx8s>aTN*{JqxPc4Q2%aIlPjfoq1!LYC(hIi-4FZ20NP9 zO_9tjvFJlsHNc#HBOc=6Q|sX2j2@8dY~VX8#1gIsBufjZtzzJSz9^B*?X;-iO|I(~ zAeI)y(+Q#D$pm?SzjF16bC7{dp3L%;O7K9z_mBQVjV1^V?pC_@<8LQTr$aI zjDvK1C-(+g8VCD591uAmHCk98J`U!gj4@gR0rro6zfl$HIwV%Eh%$%@W`jv?1G$c# zQ&W^L1j0smbYP!OBz@N2RVE{aQC=kzs55DH6SVw;x@c>kWy{RBFM`a)DH+X>E6g<` zt$`675-Yk#WEpoMWs0d-tc9&W~LGq)}54pwUTZmGipIFrWO3qE^U%#M~bP*|C(-S)!}91zUQ z_GNtrgy1ZMo;P;zo5!c!0zLy`hGIdxu`OpG2w04Bpz>^<2sq4Xd=wNX_OMEqkM2Zb z*7hI8=?hWC0IHas-95#bkDr9!XJp3Aa0q7l;taQ+5Pi4lV3!+s&W^+d? z1KT;qf$WWMe$%7c+1o3+O++aK^C946St1l_0n{nb^>^5v3vMvA+|vvlVB$*}!oVv& zeahM{PT{S48w>7yCgdzb1r#v)K<1vFg<|y%+sI zzYm;|SMiEKOz;2v`#=$v4}sXUwuiqu2a$0+ArfNe?1$G+=M};beDMsVJ2|7gYUT81 zJb!2!9xUjDP&biP;A1}giD8&cVB)*>py7+l@q1he-u7wE41gsGu%7_Gd3)C?WK-_w zdS+e}k19BspS1JSj&G_#^-?vn#p+v>6)Y`)6KOh&lVVC7j;v-VjRj(zf>;N&esG@q zsBy%zxOo$w1u!5WmR}S7tz%+7c##FcN*c^w(X-2LT{$j~)V2rW*&GJP#R4b2Afir{?jfe9t{ZGL_~AQ~oDe(->wokB7*N05p)|2GxY{toGX-)h4YQ*8 z^ZKgAB3F3;*Ssyp<$rSL<^d&>d~8Wl9SUOr#ASdgFXLxdIJwF^rgHIv7jY=DcRIw? zCjxv6#I}3zX0F+-E*`ix1kX`bwNQ&eK#YjcPQDG8H9ov78tVxNhyqjTf@WjbgJ$h3 zgF8Ds=U!sA0KJ~~_~b_+|9I}#;xTgWlrHeIJHHWaj-9)G0mkQK_n)2T*Fh}^uywmc z(ReCreKB2Z)7{57VY<`10yq7tp|z{I!$X(3S9r(3Rxmd6QP78f;Pu=VMj}U5BoqAQ z&%oMFI=CitF^ieqzNls=begZSyXw_&s2_%rP@AEmBul(`zpjMq1xyC83|c?YCWiUz zXIDf6XDE2&qzZ9mKpf;nS!U)*RlI?@>?!*=Yw5_VGYgT+_L7K#9= zGwpzxCmQJp+m7Kq^ix=#1qN+xD~ljY5c{@rUYfCE7w3qJ)6yFa?buma5ePRRDQ0Q}BDh78!^luH4*Mk_zP z#FtFoeq)6VW&{C8M!`8RgrKU4Q*=&ojjS{W^cubY(bmX_csa)+l7%fY{LsnSChn!%AWsJOi z@YrEB399D@I&zLSF>;k4K_k=QVl?V@Cn-l7kR1!Asu`M!sEy$PSd9Q<6ep_2+d84L zY{HdTDXd!D_wB1T3i zk#z3l5P0Va@s7@Y(`>HL)!O*LMH1SY^sq_qj&8;N4CzRwIkQ1td^GWBd1rrv}28-y%RdC*r+JVvO-gB^!LF03l~V3m^gGmCmRF z1E7hn@@lad1|ZilB#O0JWDEpfs-cP(@tq9N92`jk-s#?RdS(Jhweo~kUk(Cb6V#}B zm?Lei*PjXMPvA5ISjAx_Q4RO0ax*_)EPX#%OrZr!0AY;jM?-O6K`Thm%=?pYG ziw{@_efuY{#^K$zJF{}=uU9i$;fy^|zq|Ch9~3o%KHxIz}S2Sb`Rn!kQ_@sOC++-qOa zteQgIupV9nV}*VG6XOD_+Xn>H`jnsf_{a0EV6R&{t7P7tr?RF{X! zhPd%M7%&pF^EXPDSm2YI#ta$1dLx_{)W8G}41w_*>gGFOX+b+Rd1Rii{rVZO!tweF zhkefAOINp{3AnRfb$29 zM9T;XxZ-qBX1F1=4$=>-2zIUD}<(_4`)eDOa5Ubh+s@=WV z53vT?euMTStJnz(Ff=UpG6EcsfK`NWzMaR;^>(s?ywRD)unV$zA0QL7pB2eZGcyc# z%|u*Eczyg3RHjVjL$y=5gazp8#~v%npB32{$WGmDZ&#Rx2!j%dyi z9b6~c>0~>UgPI$P3VN;PW3>ntg6%;phB(awrLLQmT){&yS|0X6HEX;viU!6;pCxh2z<)(n4 z)U#VuyBc=`n5!a2I1yC50S4r!+Ee!YLB|}PAK`0T^EC5-Mo{>ItUbv)4~cGNPVBYz ze7n`_KduJ7QRmCo^H@NZO{8%1wUs`!$0i& E2icm8*Z=?k diff --git a/testing/web3signer_tests/tls/web3signer/key.p12 b/testing/web3signer_tests/tls/web3signer/key.p12 index f05f5aab570b02ae22d1e550457904c2a11ddcc8..792dc197f86dc4568df492f37bd1ac1ab3808a35 100644 GIT binary patch literal 4371 zcmai&WmFT4+s26zqXtMbknR{=4v-Nd?O>wvNJ@?lVUk0-Mu)VZgHaNrrCUHkN>Iwt zsS*PE`akb`p8v=9!+q{^U)P80OwmcmSHd z{;ymEL6evM*CH=Qlc)ccQb>r2{;t^nI>eBN{~lztkoyqP|DH4uFn}^(W(TVSN~9qn zp(KLPkW>C|8VQh$2*O1Uw28h){EQSpEC!(WHfHUr{Yo8l2IkBa&i51yN0aktnNzp3 zw(MVY{sSNC8TNnG&zZN5t{G#qcEqbR4;eO&zdu6Xac+&{>u!ibC}4MSx|8p|y}{Xo z55gZrnW>@6NjZ3ADHQkhY?q@gg|m-ZM#WWNGk78Q`=VJ74$c*i3f2*2o|@qwGICEq zXzmjpI&65(D$=>yz)9z>=AD1Z~i&vS8thCWaBo{!pE;uy_T^aFoBrN59} zkL;UVf-0##2<;L{lrWmOJHh&Cl!*CMn5dxkp{58dnzMSl$^Q3y^XwsL% z@mOk2v7}#?LXv~XQg7o+0Il6FUva9t4?iF1kBy?madH>L&d`7irwqWPFAlhw_cBG(oI*Sl#NXgnpyY z9TcX!5|)|S<`tr8tF12f%^LR>=bS}bjgecDIAetZRD7Prv9-QJclX0@)GV-zz=Q{X zOjc!-YP&~~_U8&$aX*M8S$2L5#qxc2?y4^`g%f{bmwC&BYpDx|Vw*=O|C#YfPIk0f z>K_6;XJR&LwPClfPiDN5edFI^ThoO!v9oqyD=E5_UmiZcB7IA(=IZ>@%1-2$SF;m< zrx;OI$@oY^C#zqC$+6kgCPq~Okac8;as8y;dIZ!wehOQ>V}`8ZS7rWX978LleZy{h zj7E_!ZR<)S34McAfd-02G%`)nDa^CvS%wXu{dgnLAjv3IQ*b#0GMfi!&d%AR} z%lCwIRfCP!^wRS?!PC~_dF+Z&yR&K@&Nd9f$Xlc2!gT4ywC{4MWr5DpJ@9-*RUv6i zMwmS}Ua^I9yuWe@Y9ne_AZ;-?y;zXM+y2&i&wBC#F>brGDe{S3Cd7yeDo+Pgc1}m`VC=J{d)nf?2w}x^Cd(>%`lg`<4Bb6oQN4G3gy7S*EGVdegR)LEmH&RJ=9u}wA zb?CYX`S2|=!=`lQ$GV27XgPTr$F4b#3R$ZHb^XlLgP-sYy#?6_#fdMJh$9Wd`JrT? z94=j2Po;=F-5;n+4)7Tieesekr5iTU62tC2-wr=9FZQaRw$!e5A84jDzonF_PL)-+ z28|q^9Z@{$uk_7wcI)U3VwyO}gs<7cFy5*!l9K&U&%NXT7wgR9lA25L>z==SdGG2D zs@ueJYqu}*R}_d?o|zWIv=Gps%PB!S5C|uaI?|hn(8=>0iMk#_A`a?>ZXADD1+3LR zPDy_F+pi~kgZGAeZ7z(kuo&lMF;4xcXWjm*Y^+O`F>4e}(u0R)aqu_$J2UCvGOsKj zf?sog(1OvlAhfyQdxz5S%@l>+6**k`c)*(}qAqA&f}s3T(0c{DkkeaffMQrSG#-4h zSUxe3#uCzS|AcXjuw(WfHt=xdPm6Fkd64`G+7Z|MV(|0xb($gL} zeD|F8xgSy_X=M$_+=TAP$1VC{%9uLNP$CrT%k4VQ+_4C{Lb_Ry!EL%Uj>e}i)KtSN zqMk@o`uR-SaEN#t(G=>>7kYEuQI>+@y&zZHWBPA8=B|Jn39<6^?LRlUS);|kSLEV{ zA)~vFz8%$62F|eAaB1=BO$XGqSG@(p*U9orZvi%bZ56BU5C-wnX8l&>;Z;Nb4r3Ku zc4yvcU0qeGUQTL#kdZ(iewH>W{6)^=SUwnKUfC3dl+Pn|HwHFND(}fpN88_tJ6G<& zlia8x=(eaQk2@2=xhrXb6KYP_-OBp|-I>D$ykFLeUsg882Q4G2ZVtmX^eB<~>bQ(2 z2QMelaN=7dMx_MxkBgTCk=x*g78Z?RiMi-W|y{{S@G-Me`{$F5$ zGQyM{MMv7sjJ&x+~d% zGyCj15_~5_+>n&LV@NrlNcR4to;M7oI&Ixqt?WF#q0ugU=~-Cu)0R)T>WByTP!7^m zVPx7MA0)UwU;Cxk*^%Z*YFsE<`12_O6Gnkb_xMRL74G7k2~kmBhSK(=k5Wkp$l!0L9e6kMFdzK%Jmqyf0zi2 zJN!eKI2eTP>SQVTUFojMkgeVS@qU)X^?|uk)~qUB*1_J`;q&@G`&tYo-3Tvj3~Ksk z*Ug2x2Pi|n!H*a*@N0B6H3NM!st*ii;h5)<2*efk&qBTwhYIXouDHO^pQlj__(ZaJ z?yP*{1{W)345y${QrqI1LUJa24m74!d@0iTgxHGt?Np&%@BS|nX0IbSQBS_gi#rkW zA3Z|mJx}KZ>mYAPJ)X~VefU-&!0WZt$IO9L7KRKxOD4BW@D_=dt5*L=jQ(mMg3RZm zi{!l#V6SuY=&aarI%i>*kS){qrHdtdgaFk4UOl6dc<#y*v-<~Qg{+vE?rgRsuIHme z|Eb9>TOL=G&H)eQP4~U1Im_@G!Cq6vTkE0b__030V-*+Ny|{Nom zDn8X;=hc}0?`Nh54l7Fgy1BmMzdZM6;(rV~Bltk>{0v=|+n!tB3c}8;^ip?O=9pjD zC{Ug3_aM1eGRwyLUIeJ;4rlXKbXe_zL26{(6Vlvb)YeHQv$cyoqWGpRcu{F&I83j7-r_k=Ti5wHc18%`yWln`;T*O#_ zN`8FadnB5Q0l#>ZfGoA<* zGY0=EI^}W0taMnbXVh7SjoC0no=0GFgvu`&Qau`3m_N52`4s^GseVLle&6l;sm27x zQhlPeKqAzi5>|y&1Q3+>EL6^&P=Rsb@gytl?iydYt!IBaH9;i>nGZ~9g2zL-nu6}? z4^|EGdq3KnApv9TxUfpYLWcgz#XJNH=(lQR+Lxpn5?kW9MXP-lR{jHnQmG!l=}`Mx z-Ln=I>!*wnm#PvIX)DX+;k}dew|Sk34V-W<&wV)qHcgo!8I6iwrDVt=e8Ha>k9a;E zg;th;wM5a}=n+HYm12r`p2(-=FOMjIjMi6bA$8+?+R#}HWIAA@u=H1G8c|D>Qd~0K z6phXaR0GAHvF?etGzGI+^{dj-!`ZCpwv3LJhV1+K6&tT4139bCUUp3YIpQ*{(dn~zPet#7XYlXh3kVYc^_t)9^wM=|i0a%pYX zZg*cqvzR9a>zT>cEFdI?-+O(KTXF&jHI-WzE1O83x|!X`ADX zPvfF23aj7gbeID*b1})kIqIapPp5EUZ{quTwi&c1focPAio#M}V@lDEu47KZ_WTYD zJV^>d<#6CBQvFcJ^ZWgT#criBgpGm#`n9!R{y00Wr@x(`D|I;=bs2NCMK)eg9`87G zWQ$ujVSAD4=5qPnK}jJ%IdSLRX#STde&4a{d7rP9=Dt6CYYn%&dl{vf5(M2deLpaJ zX(hh)bqjTbddLklh|gccHD@fvu2E4)9=6ZVWF}^8vQdyjoS(+yS z-hxrHOjFY}c%bm12MoaXt9x!e^7JNG<)pUK@E|JF=|kc)gYOOPZ$WvnAmTU;9Xk!` zSmY*>tphIb(zor6?d9H%R?KqC<{)Y`bYT_1+j+sGAXk)}Rw}WmOk4GSO$4!{`Sa-x zJehoK_CPTo;-aB#cAke1OGObVb=cfAPe9!fzZY9lVYFSmC-JGN;u01M6HrIOy688vS% zM9R)==q(vr5eJ<)ml2xSHVM1#&fji_w!CuA7NWDksYb>GUWnUVz+%$UtcSeJ79* zlMw-U){3&tr(1eRxAK;84*AW`D{Z!CD+VSI*D*2V1J2k@0Oh)S8TT2fquPI}-v0n$ CpCCy9 literal 7727 zcmb7J`BT*A8E2PWU1(8*aZYxWI;hdq5;h?ab#n-sgS4-|zE2*Aip- z*T)|Q1!^_k7AR{$K88&2tsuat@zsdNEggvR!BI*d(nF8XrnVj&(Q3RNs1KjkSav*Q z(i<(?EEx|Q11vE*zgc;8RUl-5R_o_w3Gne-?XcFM_p-$L7X$MdQI=WBOUZekY%mBeJ0hpEdxuQ2Y3pw zgC;hCefq2z7>YPhf&(n?i=!Dd04jy*X-ghVh7>Tq5{Qjf=VU3ow*xY`#acTDxo0g{ zzHA7$Z7A8k1T`I$-2k>3q(bHu20hIS4A`+Ly%K`q0%l61*F#k3D*Jsxp)_51A!=tX z6m_M1-<&9~jv5$%Sz{l#3ZQdeauVI_DQcHpa7*8GPg7hPV^u9ierg`XS60kK58OKq zr3wTTe{ptPG~x68kTC&kz^UDOr96I9@SyD3_|^NTEnNxg#>A2e!8ZfG&0yJ#56}iu zaU9`m%2lw3X>4R7a;Px(>T&wtT}uP71LPl0w;5+3s(y|NcPLQ9+ zq)MlV*&ksI*V^kjCvF}BW6K*sK*mfpIQ@$W(7M*DvLmyJ+5LP0UA=e$ z%+{092Vrgp1Ma>AmMA)-zM{EA?JRAWg%)b1@9cVjm0TvO58vZv(E{V3MVrxe_gz`5 zU}3?LJ3fE@Q3ez+Efz*Xu&6g@)CBPZz!D4i>&Iomd^R!mk=NgFG7dnOHH)9%g!2aV z#yN_!AqDMf;9^z89NP=P44`ySE~r#_$?I2-r0(O_!q{ll>#u(dVKhdsGz#Oj4m4Li z-BPx=EAh%3fj!K5);GV(a@0dfw&|Cmn6-*Mxn2{_?PVt67<9lqcd zARrI41X?dkbwB0vd^%6wJ6C0cxc(^Xi4<5DPZ0&$v#_y51$UR(Z<-eFJ5TL+oRko&Ob++rPK`2ohd;mSLYn7IQCusE2R zc82kD2GErfk-WvqnQjTRmD4!5a*$1`I+#qkU^pA=JXBn|bYRz$C>`i+ngZo z8g&o-A^qSldIL5&M<{u4we<0)=UYL5Q4?U)&^=)Fq5tRa_VX{w^X2)$+W+~_b~f6< z;-QRdA3$kv#a?530?S@Eiw?e2%zK;N2@4L7Edv_EB=BFUqgq%l{LT%Y3~~%=W`?!7 zEA7GNH>L5(G;?KP>q@J01lEOcl-b+BE`M0Ycc_WvJu^=uu$QG1ExPcSQg>FpH|GYx zKJm)m)+sBtRF$a0Bl+931R6>5nAu`qo|NVGt@8tB<3atFzgTCzGaWOP=gWkEVtWn!agt$>%Hp@#eTF>4B z7Fk7t9tZmiUTA}qimcRWxA&|E8V~Pgy^+~#qcPo3EBVUtL3{_mr^{ZM^wAPu>2zfNsm>N zHbNcgVl1Og%5CUCp$LYO&ak_IbkHtMR~W?2m;1y9-W0YRb|wa}E|h}P{c$(g1PCyJ z7<%ts zHp{nZmg?864 zjcnI_i`U4RvoLD{qt)!4)K68AA z3xLlLHiHd-Hh`FhsVp#7>FvkCfMI)5pfj2Nu@Jcf@KRkhqK89-E%X$wOeRO{{pVy4 zd|{R~d-ZuW9vSGEz>Rv@H^*iAcSBXrwrwt^<~;>w^<_KO=70h2$`o*4Syaas@(Na2 z4X|JgF0cmywW|rVlg2ESfFk8Us63VHgse0$jv4Ex?yAe?RLtp|X6Ao=W&$fyP@#$? zwP9`u7W&AB1ZG2cSm5?(?STcmRsMU+VNJdINOb-Ar}_i+%X!AoIhwo<-OC+*b#R+YsZ7d$Z6;* z4mPMfzL?p_Vi6?J`TKw9X6AqnhOkF@D<<$%(P$nAg@=9n0R&&IV+BvMthwq3F=8p# zkMj@ViokULFQ0&tYE7~GZC(D)!@ct>SJU1<=VZ*{y1@X(!j@xVCpQNyf-l8_9oSrZ z`EAH#7k}{pExqsK7GsuLKT{0B@z!nwld>w$hRPTLti$ki$>4P$CV;*!(OI?uVC~)$ zELutSXHP@B_qr^>*TyFfYwGBlU@CeRN^=;8Hc-Stu4hlK2BVZzRR|u=(;1$02S`tblnFSmq(G zK7Rjf%;#OU7oxvbS; zEZ72b9b#S0Z~R(T>H>6_HDG7?+&sqVBp3-!R^OOwW|~}}MOL;~m0v0Zvk7n#bcbe_RJ&?R_e#vfd0pq?rtG=fD<0z>Q~E z*9o#%;(LFL7;mwCbxdS&*Icr>kzwyCOp#SLvkycXCuaIgqyA7M$d~}- zu8Aoc0s*}unj#jUjIjuTfenST#iD-l%kK^+v$h2L{&b>^D-xTB#TaZ31vfF-G>1*9 z*iVfu4cW#EF1?opE6~d5bnwTJ73WBZ03TF+(gVE&1h6hTL}df>W&DyCqa}!AEi&6Y z3ivH{FfQ?|CPoP`(J^Z}eG>2kJYJ$Q$1zbLTPkq$Zq~&C2Jl9=PJtPi_VG$x62ys{iq+)#$x+3E#i+eA_K1 omc#1aNmZNt{ieTn-U0(u6+>g{)NOTs(D}VMkoV9={=3b80RM6v(f|Me