From ae4752040ccc19c8741685728071ab0d90a1c741 Mon Sep 17 00:00:00 2001 From: Vincenzo Palazzo Date: Tue, 3 May 2022 19:03:01 +0200 Subject: [PATCH] send warning when we receive a old commitment transaction During a `channel_reestablish` now we send a warning message when we receive a old commitment transaction from the peer. In addition, this commit include the update of functional test to make sure that the receiver will generate warn messages. Signed-off-by: Vincenzo Palazzo --- lightning/src/ln/channel.rs | 9 ++++ lightning/src/ln/functional_tests.rs | 80 ++++++++++++++++++++++------ 2 files changed, 74 insertions(+), 15 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1cb7a689a21..89112b02db9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3737,6 +3737,15 @@ impl Channel { } } + // Before change the state of the channel we check if the peer is sending a very old + // commitment transaction number, if yes we send an error (warning message). + let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number - 1; + if msg.next_remote_commitment_number + 1 < our_commitment_transaction { + return Err( + ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction)) + ); + } + // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all // remaining cases either succeed or ErrorMessage-fail). self.channel_state &= !(ChannelState::PeerDisconnected as u32); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 4defbaaa293..23952b7c53a 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -7304,8 +7304,20 @@ fn test_user_configurable_csv_delay() { } else { assert!(false); } } -#[test] -fn test_data_loss_protect() { +/// describe the test case as a enum, istead as boolean in this case! +/// with the enum there is the possibility to pass more data and +/// check more corner case. +#[derive(Debug)] +enum DataLossProtectTestCase { + /// The node that send the warning message, will try to + /// use the channel, but it can't, because after the + /// warning message we don't change the channel state. + UseChannel, + /// Try to reconnect to the node that have send the warning message + TryToReconnect, +} + +fn do_test_data_loss_protect(case: DataLossProtectTestCase) { // We want to be sure that : // * we don't broadcast our Local Commitment Tx in case of fallen behind // (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr) @@ -7402,22 +7414,60 @@ fn test_data_loss_protect() { } // Check we close channel detecting A is fallen-behind + // Check if we sent the warning message when we detecting that A is fallen-behind, + // and we give the possibility to A to be able to recover from error. nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() }); - assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction"); - check_added_monitors!(nodes[1], 1); + let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned(); + assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg)); // Check A is able to claim to_remote output - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(node_txn.len(), 1); - check_spends!(node_txn[0], chan.3); - assert_eq!(node_txn[0].output.len(), 2); - mine_transaction(&nodes[0], &node_txn[0]); - connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() }); - let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); - assert_eq!(spend_txn.len(), 1); - check_spends!(spend_txn[0], node_txn[0]); + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + // The node B should not broadcast the transaction to force close the channel! + assert!(node_txn.is_empty()); + // B should now detect that there is something wrong and should force close the channel. + let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting"; + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() }); + + // after the waring message sent by B, we should not able to + // use the channel, or reconnect with success to the channel. + match case { + DataLossProtectTestCase::UseChannel => { + assert!(nodes[0].node.list_usable_channels().is_empty()); + }, + DataLossProtectTestCase::TryToReconnect => { + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); + let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]); + let mut err_msgs_0 = Vec::with_capacity(1); + for msg in nodes[0].node.get_and_clear_pending_msg_events() { + if let MessageSendEvent::HandleError { ref action, .. } = msg { + match action { + &ErrorAction::SendErrorMessage { ref msg } => { + assert_eq!(msg.data, "Failed to find corresponding channel"); + err_msgs_0.push(msg.clone()); + }, + _ => panic!("Unexpected event!"), + } + } else { + panic!("Unexpected event!"); + } + } + assert_eq!(err_msgs_0.len(), 1); + nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(),&err_msgs_0[0]); + assert!(nodes[1].node.list_usable_channels().is_empty()); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() }); + check_closed_broadcast!(nodes[1], false); + }, + } +} + +#[test] +fn test_data_loss_protect() { + do_test_data_loss_protect(DataLossProtectTestCase::UseChannel); + do_test_data_loss_protect(DataLossProtectTestCase::TryToReconnect); } #[test]