Skip to content

Commit

Permalink
send warning when we receive a old commitment transaction
Browse files Browse the repository at this point in the history
During a `channel_reestablish` now we send a warning message when we receive a old commitment transaction from the peer.

In addition, this commit include the update of functional test to make sure that the receiver will generate warn messages.

Signed-off-by: Vincenzo Palazzo <vincenzopalazzodev@gmail.com>
  • Loading branch information
vincenzopalazzo committed May 3, 2022
1 parent 171dfee commit 6ecb4d6
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 15 deletions.
9 changes: 9 additions & 0 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3737,6 +3737,15 @@ impl<Signer: Sign> Channel<Signer> {
}
}

// Before we change the state of the channel, we check if the peer is sending a very old
// commitment transaction number, if yes we send a warning message.
let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number - 1;
if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
return Err(
ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
);
}

// Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
// remaining cases either succeed or ErrorMessage-fail).
self.channel_state &= !(ChannelState::PeerDisconnected as u32);
Expand Down
62 changes: 47 additions & 15 deletions lightning/src/ln/functional_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7304,8 +7304,7 @@ fn test_user_configurable_csv_delay() {
} else { assert!(false); }
}

#[test]
fn test_data_loss_protect() {
fn do_test_data_loss_protect(try_to_reconnect: bool) {
// We want to be sure that :
// * we don't broadcast our Local Commitment Tx in case of fallen behind
// (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr)
Expand Down Expand Up @@ -7402,22 +7401,55 @@ fn test_data_loss_protect() {
}

// Check we close channel detecting A is fallen-behind
// Check that we sent the warning message when we detected that A has fallen behind,
// and give the possibility for A to recover from the warning.
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
check_added_monitors!(nodes[1], 1);
let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));

// Check A is able to claim to_remote output
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3);
assert_eq!(node_txn[0].output.len(), 2);
mine_transaction(&nodes[0], &node_txn[0]);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
assert_eq!(spend_txn.len(), 1);
check_spends!(spend_txn[0], node_txn[0]);
let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
// The node B should not broadcast the transaction to force close the channel!
assert!(node_txn.is_empty());
// B should now detect that there is something wrong and should force close the channel.
let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting";
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });

// after the warning message sent by B, we should not able to
// use the channel, or reconnect with success to the channel.
assert!(nodes[0].node.list_usable_channels().is_empty());
if try_to_reconnect {
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);

nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
let mut err_msgs_0 = Vec::with_capacity(1);
for msg in nodes[0].node.get_and_clear_pending_msg_events() {
if let MessageSendEvent::HandleError { ref action, .. } = msg {
match action {
&ErrorAction::SendErrorMessage { ref msg } => {
assert_eq!(msg.data, "Failed to find corresponding channel");
err_msgs_0.push(msg.clone());
},
_ => panic!("Unexpected event!"),
}
} else {
panic!("Unexpected event!");
}
}
assert_eq!(err_msgs_0.len(), 1);
nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(),&err_msgs_0[0]);
assert!(nodes[1].node.list_usable_channels().is_empty());
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() });
check_closed_broadcast!(nodes[1], false);
}
}

#[test]
fn test_data_loss_protect() {
do_test_data_loss_protect(true);
}

#[test]
Expand Down

0 comments on commit 6ecb4d6

Please sign in to comment.