diff --git a/.changelog/unreleased/bug-fixes/3358-dup-write-ack.md b/.changelog/unreleased/bug-fixes/3358-dup-write-ack.md new file mode 100644 index 0000000000..50d973fdd7 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3358-dup-write-ack.md @@ -0,0 +1,3 @@ +- Fix acknowledgement packet not being relayed + due to duplicate `write_acknowledgement` events + ([\#3358](https://github.com/informalsystems/hermes/issues/3358)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/3359-dup-send-packet.md b/.changelog/unreleased/bug-fixes/3359-dup-send-packet.md new file mode 100644 index 0000000000..eadee65314 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/3359-dup-send-packet.md @@ -0,0 +1,2 @@ +- Fix bug where chain proof verification would fail due to duplicate `send_packet + events ([\#3359](https://github.com/informalsystems/hermes/issues/3359)) \ No newline at end of file diff --git a/crates/relayer-cli/src/commands/clear.rs b/crates/relayer-cli/src/commands/clear.rs index 0a8ba235c1..42149e3e65 100644 --- a/crates/relayer-cli/src/commands/clear.rs +++ b/crates/relayer-cli/src/commands/clear.rs @@ -12,7 +12,6 @@ use ibc_relayer_types::events::IbcEvent; use crate::application::app_config; use crate::cli_utils::spawn_chain_counterparty; use crate::conclude::Output; -use crate::error::Error; /// `clear` subcommands #[derive(Command, Debug, Parser, Runnable)] @@ -115,11 +114,13 @@ impl Runnable for ClearPacketsCmd { src_port_id: self.port_id.clone(), src_channel_id: self.channel_id.clone(), }; + let fwd_link = match Link::new_from_opts(chains.src.clone(), chains.dst, opts, false, false) { Ok(link) => link, Err(e) => Output::error(e).exit(), }; + let rev_link = match fwd_link.reverse(false, false) { Ok(link) => link, Err(e) => Output::error(e).exit(), @@ -127,28 +128,32 @@ impl Runnable for ClearPacketsCmd { // Schedule RecvPacket messages for pending packets in both directions. // This may produce pending acks which will be processed in the next phase. - run_and_collect_events(&mut ev_list, || { + run_and_collect_events("forward recv and timeout", &mut ev_list, || { fwd_link.relay_recv_packet_and_timeout_messages() }); - run_and_collect_events(&mut ev_list, || { + run_and_collect_events("reverse recv and timeout", &mut ev_list, || { rev_link.relay_recv_packet_and_timeout_messages() }); // Schedule AckPacket messages in both directions. - run_and_collect_events(&mut ev_list, || fwd_link.relay_ack_packet_messages()); - run_and_collect_events(&mut ev_list, || rev_link.relay_ack_packet_messages()); + run_and_collect_events("forward ack", &mut ev_list, || { + fwd_link.relay_ack_packet_messages() + }); + run_and_collect_events("reverse ack", &mut ev_list, || { + rev_link.relay_ack_packet_messages() + }); Output::success(ev_list).exit() } } -fn run_and_collect_events(ev_list: &mut Vec, f: F) +fn run_and_collect_events(desc: &str, ev_list: &mut Vec, f: F) where F: FnOnce() -> Result, LinkError>, { match f() { Ok(mut ev) => ev_list.append(&mut ev), - Err(e) => Output::error(Error::link(e)).exit(), + Err(e) => tracing::error!("Failed to relay {desc} packets: {e:?}"), }; } diff --git a/crates/relayer/src/chain/cosmos.rs b/crates/relayer/src/chain/cosmos.rs index 6cb4b1fe10..b70ca6f465 100644 --- a/crates/relayer/src/chain/cosmos.rs +++ b/crates/relayer/src/chain/cosmos.rs @@ -749,7 +749,7 @@ impl CosmosSdkChain { &mut response .begin_block_events .unwrap_or_default() - .into_iter() + .iter() .filter_map(|ev| filter_matching_event(ev, request, seqs)) .map(|ev| IbcEventWithHeight::new(ev, response_height)) .collect(), @@ -759,7 +759,7 @@ impl CosmosSdkChain { &mut response .end_block_events .unwrap_or_default() - .into_iter() + .iter() .filter_map(|ev| filter_matching_event(ev, request, seqs)) .map(|ev| IbcEventWithHeight::new(ev, response_height)) .collect(), diff --git a/crates/relayer/src/chain/cosmos/query/tx.rs b/crates/relayer/src/chain/cosmos/query/tx.rs index 15d6efe7d2..92f2f49e34 100644 --- a/crates/relayer/src/chain/cosmos/query/tx.rs +++ b/crates/relayer/src/chain/cosmos/query/tx.rs @@ -7,6 +7,7 @@ use tendermint::abci::Event; use tendermint::Hash as TxHash; use tendermint_rpc::endpoint::tx::Response as TxResponse; use tendermint_rpc::{Client, HttpClient, Order, Url}; +use tracing::warn; use crate::chain::cosmos::query::{header_query, packet_query, tx_hash_query}; use crate::chain::cosmos::types::events; @@ -96,6 +97,7 @@ pub async fn query_txs( } /// This function queries transactions for packet events matching certain criteria. +/// /// It returns at most one packet event for each sequence specified in the request. /// Note - there is no way to format the packet query such that it asks for Tx-es with either /// sequence (the query conditions can only be AND-ed). @@ -121,34 +123,46 @@ pub async fn query_packets_from_txs( let mut result: Vec = vec![]; for seq in &request.sequences { - // query first (and only) Tx that includes the event specified in the query request - let mut response = rpc_client - .tx_search( - packet_query(request, *seq), - false, - 1, - 1, // get only the first Tx matching the query - Order::Ascending, - ) + // Query the latest 10 txs which include the event specified in the query request + let response = rpc_client + .tx_search(packet_query(request, *seq), false, 1, 10, Order::Descending) .await .map_err(|e| Error::rpc(rpc_address.clone(), e))?; - debug_assert!( - response.txs.len() <= 1, - "packet_from_tx_search_response: unexpected number of txs" - ); - if response.txs.is_empty() { continue; } - let tx = response.txs.remove(0); - let event = packet_from_tx_search_response(chain_id, request, *seq, tx)?; + let mut tx_events = vec![]; - if let Some(event) = event { - result.push(event); + // Process each tx in descending order + for tx in response.txs { + // Check if the tx contains and event which matches the query + if let Some(event) = packet_from_tx_search_response(chain_id, request, *seq, &tx)? { + // We found the event + tx_events.push((event, tx.hash, tx.height)); + } } + + // If no event was found for this sequence, continue to the next sequence + if tx_events.is_empty() { + continue; + } + + // If more than one event was found for this sequence, log a warning + if tx_events.len() > 1 { + warn!("more than one packet event found for sequence {seq}, this should not happen",); + + for (event, hash, height) in &tx_events { + warn!("seq: {seq}, tx hash: {hash}, tx height: {height}, event: {event}",); + } + } + + // In either case, use the first (latest) event found for this sequence + let (first_event, _, _) = tx_events.remove(0); + result.push(first_event); } + Ok(result) } @@ -193,9 +207,9 @@ pub async fn query_packets_from_block( tx_events.append( &mut tx .events - .into_iter() - .filter_map(|e| filter_matching_event(e, request, &request.sequences)) - .map(|e| IbcEventWithHeight::new(e, height)) + .iter() + .filter_map(|ev| filter_matching_event(ev, request, &request.sequences)) + .map(|ev| IbcEventWithHeight::new(ev, height)) .collect(), ) } @@ -205,7 +219,7 @@ pub async fn query_packets_from_block( &mut block_results .begin_block_events .unwrap_or_default() - .into_iter() + .iter() .filter_map(|ev| filter_matching_event(ev, request, &request.sequences)) .map(|ev| IbcEventWithHeight::new(ev, height)) .collect(), @@ -215,7 +229,7 @@ pub async fn query_packets_from_block( &mut block_results .end_block_events .unwrap_or_default() - .into_iter() + .iter() .filter_map(|ev| filter_matching_event(ev, request, &request.sequences)) .map(|ev| IbcEventWithHeight::new(ev, height)) .collect(), @@ -278,7 +292,7 @@ fn packet_from_tx_search_response( chain_id: &ChainId, request: &QueryPacketEventDataRequest, seq: Sequence, - response: TxResponse, + response: &TxResponse, ) -> Result, Error> { let height = ICSHeight::new(chain_id.version(), u64::from(response.height)) .map_err(|_| Error::invalid_height_no_source())?; @@ -292,7 +306,7 @@ fn packet_from_tx_search_response( Ok(response .tx_result .events - .into_iter() + .iter() .find_map(|ev| filter_matching_event(ev, request, &[seq])) .map(|ibc_event| IbcEventWithHeight::new(ibc_event, height))) } @@ -301,7 +315,7 @@ fn packet_from_tx_search_response( /// is consistent with the request parameters. /// Returns `None` otherwise. pub fn filter_matching_event( - event: Event, + event: &Event, request: &QueryPacketEventDataRequest, seqs: &[Sequence], ) -> Option { @@ -321,7 +335,8 @@ pub fn filter_matching_event( return None; } - let ibc_event = ibc_event_try_from_abci_event(&event).ok()?; + let ibc_event = ibc_event_try_from_abci_event(event).ok()?; + match ibc_event { IbcEvent::SendPacket(ref send_ev) if matches_packet(request, seqs.to_vec(), &send_ev.packet) => diff --git a/crates/relayer/src/link/relay_path.rs b/crates/relayer/src/link/relay_path.rs index 63d925737e..87c346e444 100644 --- a/crates/relayer/src/link/relay_path.rs +++ b/crates/relayer/src/link/relay_path.rs @@ -419,11 +419,10 @@ impl RelayPath { telemetry!(received_event_batch, tracking_id); for i in 1..=MAX_RETRIES { - let cleared = self - .schedule_recv_packet_and_timeout_msgs(height, tracking_id) - .and_then(|_| self.schedule_packet_ack_msgs(height, tracking_id)); + let cleared_recv = self.schedule_recv_packet_and_timeout_msgs(height, tracking_id); + let cleared_ack = self.schedule_packet_ack_msgs(height, tracking_id); - match cleared { + match cleared_recv.and(cleared_ack) { Ok(()) => return Ok(()), Err(e) => error!( "failed to clear packets, retry {}/{}: {}",