From 4791fc6082bd8a2928d642454e98b629f4ab3e20 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 6 Dec 2024 20:39:07 +0100 Subject: [PATCH 01/13] protofsm: eliminate outer option layer in EmmittedEvent We'll have the empty slice tuple represent the None case instead. --- protofsm/state_machine.go | 59 +++++++++++++--------------------- protofsm/state_machine_test.go | 8 ++--- 2 files changed, 25 insertions(+), 42 deletions(-) diff --git a/protofsm/state_machine.go b/protofsm/state_machine.go index a81f5746b2..2cc1219022 100644 --- a/protofsm/state_machine.go +++ b/protofsm/state_machine.go @@ -34,11 +34,11 @@ type EmittedEvent[Event any] struct { // InternalEvent is an optional internal event that is to be routed // back to the target state. This enables state to trigger one or many // state transitions without a new external event. - InternalEvent fn.Option[[]Event] + InternalEvent []Event // ExternalEvent is an optional external event that is to be sent to // the daemon for dispatch. Usually, this is some form of I/O. - ExternalEvents fn.Option[DaemonEventSet] + ExternalEvents DaemonEventSet } // StateTransition is a state transition type. It denotes the next state to go @@ -573,46 +573,31 @@ func (s *StateMachine[Event, Env]) applyEvents(currentState State[Event, Env], // With the event processed, we'll process any // new daemon events that were emitted as part // of this new state transition. - // - //nolint:ll - err := fn.MapOptionZ(events.ExternalEvents, func(dEvents DaemonEventSet) error { - log.Debugf("FSM(%v): processing "+ - "daemon %v daemon events", - s.cfg.Env.Name(), len(dEvents)) - - for _, dEvent := range dEvents { - err := s.executeDaemonEvent( - dEvent, - ) - if err != nil { - return err - } + for _, dEvent := range events.ExternalEvents { + err := s.executeDaemonEvent( + dEvent, + ) + if err != nil { + return err } - - return nil - }) - if err != nil { - return err } - // Next, we'll add any new emitted events to - // our event queue. + // Next, we'll add any new emitted events to our + // event queue. // //nolint:ll - events.InternalEvent.WhenSome(func(es []Event) { - for _, inEvent := range es { - log.Debugf("FSM(%v): adding "+ - "new internal event "+ - "to queue: %v", - s.cfg.Env.Name(), - lnutils.SpewLogClosure( - inEvent, - ), - ) - - eventQueue.Enqueue(inEvent) - } - }) + for _, inEvent := range events.InternalEvent { + log.Debugf("FSM(%v): adding "+ + "new internal event "+ + "to queue: %v", + s.cfg.Env.Name(), + lnutils.SpewLogClosure( + inEvent, + ), + ) + + eventQueue.Enqueue(inEvent) + } return nil }) diff --git a/protofsm/state_machine_test.go b/protofsm/state_machine_test.go index fc7a4ccfdc..ea596dc250 100644 --- a/protofsm/state_machine_test.go +++ b/protofsm/state_machine_test.go @@ -80,9 +80,7 @@ func (d *dummyStateStart) ProcessEvent(event dummyEvents, env *dummyEnv, return &StateTransition[dummyEvents, *dummyEnv]{ NextState: &dummyStateStart{}, NewEvents: fn.Some(EmittedEvent[dummyEvents]{ - InternalEvent: fn.Some( - []dummyEvents{&goToFin{}}, - ), + InternalEvent: []dummyEvents{&goToFin{}}, }), }, nil @@ -114,13 +112,13 @@ func (d *dummyStateStart) ProcessEvent(event dummyEvents, env *dummyEnv, canSend: d.canSend, }, NewEvents: fn.Some(EmittedEvent[dummyEvents]{ - ExternalEvents: fn.Some(DaemonEventSet{ + ExternalEvents: DaemonEventSet{ sendEvent, sendEvent2, &BroadcastTxn{ Tx: &wire.MsgTx{}, Label: "test", }, - }), + }, }), }, nil } From 62a3db1cc2d3f8a1731e83d8105cf84437dd963b Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 31 Jan 2024 19:17:38 -0800 Subject: [PATCH 02/13] lnwallet/chancloser: add states for new protofsm rbf closer In this commit, we add the initial set of states for the new protofsm based rbf chan closer. A diagram outlining the new states and their transitions can be found here: https://gist.github.com/Roasbeef/acc4ff51b9dff127230228a05553cdfe. Unlike the existing co-op close process, this co-op close can be restarted at anytime if either side sends a shutdown message. From there, we'll each obtain a new RBF'd version that can be re-broadcasted. This commit creates the set of states, along with the environment that our state machine will use to drive itself forward. --- lnwallet/chancloser/rbf_coop_states.go | 756 +++++++++++++++++++++++++ 1 file changed, 756 insertions(+) create mode 100644 lnwallet/chancloser/rbf_coop_states.go diff --git a/lnwallet/chancloser/rbf_coop_states.go b/lnwallet/chancloser/rbf_coop_states.go new file mode 100644 index 0000000000..764cdecb20 --- /dev/null +++ b/lnwallet/chancloser/rbf_coop_states.go @@ -0,0 +1,756 @@ +package chancloser + +import ( + "fmt" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/fn/v2" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/protofsm" +) + +var ( + // ErrInvalidStateTransition is returned when we receive an unexpected + // event for a given state. + ErrInvalidStateTransition = fmt.Errorf("invalid state transition") + + // ErrTooManySigs is returned when we receive too many sigs from the + // remote party in the ClosingSigs message. + ErrTooManySigs = fmt.Errorf("too many sigs received") + + // ErrNoSig is returned when we receive no sig from the remote party. + ErrNoSig = fmt.Errorf("no sig received") + + // ErrUnknownFinalBalance is returned if we're unable to determine the + // final channel balance after a flush. + ErrUnknownFinalBalance = fmt.Errorf("unknown final balance") + + // ErrRemoteCannotPay is returned if the remote party cannot pay the + // pay for the fees when it sends a signature. + ErrRemoteCannotPay = fmt.Errorf("remote cannot pay fees") + + // ErrNonFinalSequence is returned if we receive a non-final sequence + // from the remote party for their signature. + ErrNonFinalSequence = fmt.Errorf("received non-final sequence") + + // ErrCloserNoClosee is returned if our balance is dust, but the remote + // party includes our output. + ErrCloserNoClosee = fmt.Errorf("expected CloserNoClosee sig") + + // ErrCloserAndClosee is returned when we expect a sig covering both + // outputs, it isn't present. + ErrCloserAndClosee = fmt.Errorf("expected CloserAndClosee sig") +) + +// ProtocolEvent is a special interface used to create the equivalent of a +// sum-type, but using a "sealed" interface. Protocol events can be used as +// input to trigger a state transition, and also as output to trigger a new set +// of events into the very same state machine. +type ProtocolEvent interface { + protocolSealed() +} + +// ProtocolEvents is a special type constraint that enumerates all the possible +// protocol events. This is used mainly as type-level documentation, and may +// also be useful to constraint certain state transition functions. +type ProtocolEvents interface { + SendShutdown | ShutdownReceived | ShutdownComplete | ChannelFlushed | + SendOfferEvent | OfferReceivedEvent | LocalSigReceived | + SpendEvent +} + +// SpendEvent indicates that a transaction spending the funding outpoint has +// been confirmed in the main chain. +type SpendEvent struct { + // Tx is the spending transaction that has been confirmed. + Tx *wire.MsgTx + + // BlockHeight is the height of the block that confirmed the + // transaction. + BlockHeight uint32 +} + +// protocolSealed indicates that this struct is a ProtocolEvent instance. +func (s *SpendEvent) protocolSealed() {} + +// SendShutdown indicates that the user wishes to co-op close the channel, so we +// should send a new shutdown message to the remote party. +// +// transition: +// - fromState: ChannelActive +// - toState: ChannelFlushing +type SendShutdown struct { + // DeliveryAddr is the address we'd like to receive the funds to. If + // None, then a new addr will be generated. + DeliveryAddr fn.Option[lnwire.DeliveryAddress] + + // IdealFeeRate is the ideal fee rate we'd like to use for the closing + // attempt. + IdealFeeRate chainfee.SatPerVByte +} + +// protocolSealed indicates that this struct is a ProtocolEvent instance. +func (s *SendShutdown) protocolSealed() {} + +// ShutdownReceived indicates that we received a shutdown event so we need to +// enter the flushing state. +// +// transition: +// - fromState: ChannelActive +// - toState: ChannelFlushing +type ShutdownReceived struct { + // ShutdownScript is the script the remote party wants to use to + // shutdown. + ShutdownScript lnwire.DeliveryAddress + + // BlockHeight is the height at which the shutdown message was + // received. This is used for channel leases to determine if a co-op + // close can occur. + BlockHeight uint32 +} + +// protocolSealed indicates that this struct is a ProtocolEvent instance. +func (s *ShutdownReceived) protocolSealed() {} + +// ShutdownComplete is an event that indicates the channel has been fully +// shutdown. At this point, we'll go to the ChannelFlushing state so we can +// wait for all pending updates to be gone from the channel. +// +// transition: +// - fromState: ShutdownPending +// - toState: ChannelFlushing +type ShutdownComplete struct { +} + +// protocolSealed indicates that this struct is a ProtocolEvent instance. +func (s *ShutdownComplete) protocolSealed() {} + +// ShutdownBalances holds the local+remote balance once the channel has been +// fully flushed. +type ShutdownBalances struct { + // LocalBalance is the local balance of the channel. + LocalBalance lnwire.MilliSatoshi + + // RemoteBalance is the remote balance of the channel. + RemoteBalance lnwire.MilliSatoshi +} + +// unknownBalance is a special variable used to denote an unknown channel +// balance (channel not fully flushed yet). +var unknownBalance = ShutdownBalances{} + +// ChannelFlushed is an event that indicates the channel has been fully flushed +// can we can now start closing negotiation. +// +// transition: +// - fromState: ChannelFlushing +// - toState: ClosingNegotiation +type ChannelFlushed struct { + // FreshFlush indicates if this is the first time the channel has been + // flushed, or if this is a flush as part of an RBF iteration. + FreshFlush bool + + // ShutdownBalances is the balances of the channel once it has been + // flushed. We tie this to the ChannelFlushed state as this may not be + // the same as the starting value. + ShutdownBalances +} + +// protocolSealed indicates that this struct is a ProtocolEvent instance. +func (c *ChannelFlushed) protocolSealed() {} + +// SendOfferEvent is a self-triggered event that transitions us from the +// LocalCloseStart state to the LocalOfferSent state. This kicks off the new +// signing process for the co-op close process. +// +// transition: +// - fromState: LocalCloseStart +// - toState: LocalOfferSent +type SendOfferEvent struct { + // TargetFeeRate is the fee rate we'll use for the closing transaction. + TargetFeeRate chainfee.SatPerVByte +} + +// protocolSealed indicates that this struct is a ProtocolEvent instance. +func (s *SendOfferEvent) protocolSealed() {} + +// LocalSigReceived is an event that indicates we've received a signature from +// the remote party, which signs our the co-op close transaction at our +// specified fee rate. +// +// transition: +// - fromState: LocalOfferSent +// - toState: ClosePending +type LocalSigReceived struct { + // SigMsg is the sig message we received from the remote party. + SigMsg lnwire.ClosingSig +} + +// protocolSealed indicates that this struct is a ProtocolEvent instance. +func (s *LocalSigReceived) protocolSealed() {} + +// OfferReceivedEvent is an event that indicates we've received an offer from +// the remote party. This applies to the RemoteCloseStart state. +// +// transition: +// - fromState: RemoteCloseStart +// - toState: ClosePending +type OfferReceivedEvent struct { + // SigMsg is the signature message we received from the remote party. + SigMsg lnwire.ClosingComplete +} + +// protocolSealed indicates that this struct is a ProtocolEvent instance. +func (s *OfferReceivedEvent) protocolSealed() {} + +// CloseSigner is an interface that abstracts away the details of the signing +// new coop close transactions. +type CloseSigner interface { + // CreateCloseProposal creates a new co-op close proposal in the form + // of a valid signature, the chainhash of the final txid, and our final + // balance in the created state. + CreateCloseProposal(proposedFee btcutil.Amount, + localDeliveryScript []byte, remoteDeliveryScript []byte, + closeOpt ...lnwallet.ChanCloseOpt, + ) ( + input.Signature, *chainhash.Hash, btcutil.Amount, error) + + // CompleteCooperativeClose persistently "completes" the cooperative + // close by producing a fully signed co-op close transaction. + CompleteCooperativeClose(localSig, remoteSig input.Signature, + localDeliveryScript, remoteDeliveryScript []byte, + proposedFee btcutil.Amount, closeOpt ...lnwallet.ChanCloseOpt, + ) (*wire.MsgTx, btcutil.Amount, error) +} + +// ChanStateObserver is an interface used to observe state changes that occur +// in a channel. This can be used to figure out if we're able to send a +// shutdown message or not. +type ChanStateObserver interface { + // NoDanglingUpdates returns true if there are no dangling updates in + // the channel. In other words, there are no active update messages + // that haven't already been covered by a commit sig. + NoDanglingUpdates() bool + + // DisableIncomingAdds instructs the channel link to disable process new + // incoming add messages. + DisableIncomingAdds() error + + // DisableOutgoingAdds instructs the channel link to disable process + // new outgoing add messages. + DisableOutgoingAdds() error + + // MarkCoopBroadcasted persistently marks that the channel close + // transaction has been broadcast. + MarkCoopBroadcasted(*wire.MsgTx, bool) error + + // MarkShutdownSent persists the given ShutdownInfo. The existence of + // the ShutdownInfo represents the fact that the Shutdown message has + // been sent by us and so should be re-sent on re-establish. + MarkShutdownSent(deliveryAddr []byte, isInitiator bool) error + + // FinalBalances is the balances of the channel once it has been + // flushed. If Some, then this indicates that the channel is now in a + // state where it's always flushed, so we can accelerate the state + // transitions. + FinalBalances() fn.Option[ShutdownBalances] +} + +// Environment is a set of dependencies that a state machine may need to carry +// out the logic for a given state transition. All fields are to be considered +// immutable, and will be fixed for the lifetime of the state machine. +type Environment struct { + // ChainParams is the chain parameters for the channel. + ChainParams chaincfg.Params + + // ChanPeer is the peer we're attempting to close the channel with. + ChanPeer btcec.PublicKey + + // ChanPoint is the channel point of the active channel. + ChanPoint wire.OutPoint + + // ChanID is the channel ID of the channel we're attempting to close. + ChanID lnwire.ChannelID + + // ShortChanID is the short channel ID of the channel we're attempting + // to close. + Scid lnwire.ShortChannelID + + // ChanType is the type of channel we're attempting to close. + ChanType channeldb.ChannelType + + // DefaultFeeRate is the fee we'll use for the closing transaction if + // the user didn't specify an ideal fee rate. This may happen if the + // remote party is the one that initiates the co-op close. + DefaultFeeRate chainfee.SatPerVByte + + // ThawHeight is the height at which the channel will be thawed. If + // this is None, then co-op close can occur at any moment. + ThawHeight fn.Option[uint32] + + // RemoteUprontShutdown is the upfront shutdown addr of the remote + // party. We'll use this to validate if the remote peer is authorized to + // close the channel with the sent addr or not. + RemoteUpfrontShutdown fn.Option[lnwire.DeliveryAddress] + + // LocalUprontShutdown is our upfront shutdown address. If Some, then + // we'll default to using this. + LocalUpfrontShutdown fn.Option[lnwire.DeliveryAddress] + + // NewDeliveryScript is a function that returns a new delivery script. + // This is used if we don't have an upfront shutdown addr, and no addr + // was specified at closing time. + NewDeliveryScript func() (lnwire.DeliveryAddress, error) + + // FeeEstimator is the fee estimator we'll use to determine the fee in + // satoshis we'll pay given a local and/or remote output. + FeeEstimator CoopFeeEstimator + + // ChanObserver is an interface used to observe state changes to the + // channel. We'll use this to figure out when/if we can send certain + // messages. + ChanObserver ChanStateObserver + + // CloseSigner is the signer we'll use to sign the close transaction. + // This is a part of the ChannelFlushed state, as the channel state + // we'll be signing can only be determined once the channel has been + // flushed. + CloseSigner CloseSigner +} + +// Name returns the name of the environment. This is used to uniquely identify +// the environment of related state machines. For this state machine, the name +// is based on the channel ID. +func (e *Environment) Name() string { + return fmt.Sprintf("rbf_chan_closer(%v)", e.ChanPoint) +} + +// CloseStateTransition is the StateTransition type specific to the coop close +// state machine. +// +//nolint:ll +type CloseStateTransition = protofsm.StateTransition[ProtocolEvent, *Environment] + +// ProtocolState is our sum-type ish interface that represents the current +// protocol state. +type ProtocolState interface { + // protocolStateSealed is a special method that is used to seal the + // interface (only types in this package can implement it). + protocolStateSealed() + + // IsTerminal returns true if the target state is a terminal state. + IsTerminal() bool + + // ProcessEvent takes a protocol event, and implements a state + // transition for the state. + ProcessEvent(ProtocolEvent, *Environment) (*CloseStateTransition, error) +} + +// AsymmetricPeerState is an extension of the normal ProtocolState interface +// that gives a caller a hit on if the target state should process an incoming +// event or not. +type AsymmetricPeerState interface { + ProtocolState + + // ShouldRouteTo returns true if the target state should process the + // target event. + ShouldRouteTo(ProtocolEvent) bool +} + +// ProtocolStates is a special type constraint that enumerates all the possible +// protocol states. +type ProtocolStates interface { + ChannelActive | ShutdownPending | ChannelFlushing | ClosingNegotiation | + LocalCloseStart | LocalOfferSent | RemoteCloseStart | + ClosePending | CloseFin +} + +// ChannelActive is the base state for the channel closer state machine. In +// this state, we haven't begun the shutdown process yet, so the channel is +// still active. Receiving the ShutdownSent or ShutdownReceived events will +// transition us to the ChannelFushing state. +// +// When we transition to this state, we emit a DaemonEvent to send the shutdown +// message if we received one ourselves. Alternatively, we may send out a new +// shutdown if we're initiating it for the very first time. +// +// transition: +// - fromState: None +// - toState: ChannelFlushing +// +// input events: +// - SendShutdown +// - ShutdownReceived +type ChannelActive struct { +} + +// IsTerminal returns true if the target state is a terminal state. +func (c *ChannelActive) IsTerminal() bool { + return false +} + +// protocolSealed indicates that this struct is a ProtocolEvent instance. +func (c *ChannelActive) protocolStateSealed() {} + +// ShutdownScripts is a set of scripts that we'll use to co-op close the +// channel. +type ShutdownScripts struct { + // LocalDeliveryScript is the script that we'll send our settled + // channel funds to. + LocalDeliveryScript lnwire.DeliveryAddress + + // RemoteDeliveryScript is the script that we'll send the remote + // party's settled channel funds to. + RemoteDeliveryScript lnwire.DeliveryAddress +} + +// ShutdownPending is the state we enter into after we've sent or received the +// shutdown message. If we sent the shutdown, then we'll wait for the remote +// party to send a shutdown. Otherwise, if we received it, then we'll send our +// shutdown then go to the next state. +// +// transition: +// - fromState: ChannelActive +// - toState: ChannelFlushing +// +// input events: +// - SendShutdown +// - ShutdownReceived +type ShutdownPending struct { + // ShutdownScripts store the set of scripts we'll use to initiate a coop + // close. + ShutdownScripts + + // IdealFeeRate is the ideal fee rate we'd like to use for the closing + // attempt. + IdealFeeRate fn.Option[chainfee.SatPerVByte] +} + +// IsTerminal returns true if the target state is a terminal state. +func (s *ShutdownPending) IsTerminal() bool { + return false +} + +// protocolStateSealed indicates that this struct is a ProtocolEvent instance. +func (s *ShutdownPending) protocolStateSealed() {} + +// ChannelFlushing is the state we enter into after we've received or sent a +// shutdown message. In this state, we wait the ChannelFlushed event, after +// which we'll transition to the CloseReady state. +// +// transition: +// - fromState: ShutdownPending +// - toState: ClosingNegotiation +// +// input events: +// - ShutdownComplete +// - ShutdownReceived +type ChannelFlushing struct { + // EarlyRemoteOffer is the offer we received from the remote party + // before we obtained the local channel flushed event. We'll stash this + // to process later. + EarlyRemoteOffer fn.Option[OfferReceivedEvent] + + // ShutdownScripts store the set of scripts we'll use to initiate a coop + // close. + ShutdownScripts + + // IdealFeeRate is the ideal fee rate we'd like to use for the closing + // transaction. Once the channel has been flushed, we'll use this as + // our target fee rate. + IdealFeeRate fn.Option[chainfee.SatPerVByte] +} + +// protocolStateSealed indicates that this struct is a ProtocolEvent instance. +func (c *ChannelFlushing) protocolStateSealed() {} + +// IsTerminal returns true if the target state is a terminal state. +func (c *ChannelFlushing) IsTerminal() bool { + return false +} + +// ClosingNegotiation is the state we transition to once the channel has been +// flushed. This is actually a composite state that contains one for each side +// of the channel, as the closing process is asymmetric. Once either of the +// peer states reaches the CloseFin state, then the channel is fully closed, +// and we'll transition to that terminal state. +// +// transition: +// - fromState: ChannelFlushing +// - toState: CloseFin +// +// input events: +// - ChannelFlushed +type ClosingNegotiation struct { + // PeerStates is a composite state that contains the state for both the + // local and remote parties. Our usage of Dual makes this a special + // state that allows us to treat two states as a single state. We'll use + // the ShouldRouteTo method to determine which state route incoming + // events to. + PeerState lntypes.Dual[AsymmetricPeerState] +} + +// IsTerminal returns true if the target state is a terminal state. +func (c *ClosingNegotiation) IsTerminal() bool { + return false +} + +// protocolSealed indicates that this struct is a ProtocolEvent instance. +func (c *ClosingNegotiation) protocolStateSealed() {} + +// CloseChannelTerms is a set of terms that we'll use to close the channel. This +// includes the balances of the channel, and the scripts we'll use to send each +// party's funds to. +type CloseChannelTerms struct { + ShutdownScripts + + ShutdownBalances +} + +// DeriveCloseTxOuts takes the close terms, and returns the local and remote tx +// out for the close transaction. If an output is dust, then it'll be nil. +// +// TODO(roasbeef): add func for w/e heuristic to not manifest own output? +func (c *CloseChannelTerms) DeriveCloseTxOuts() (*wire.TxOut, *wire.TxOut) { + //nolint:ll + deriveTxOut := func(balance btcutil.Amount, pkScript []byte) *wire.TxOut { + dustLimit := lnwallet.DustLimitForSize(len(pkScript)) + if balance >= dustLimit { + return &wire.TxOut{ + PkScript: pkScript, + Value: int64(balance), + } + } + + return nil + } + + localTxOut := deriveTxOut( + c.LocalBalance.ToSatoshis(), + c.LocalDeliveryScript, + ) + remoteTxOut := deriveTxOut( + c.RemoteBalance.ToSatoshis(), + c.RemoteDeliveryScript, + ) + + return localTxOut, remoteTxOut +} + +// RemoteAmtIsDust returns true if the remote output is dust. +func (c *CloseChannelTerms) RemoteAmtIsDust() bool { + return c.RemoteBalance.ToSatoshis() < lnwallet.DustLimitForSize( + len(c.RemoteDeliveryScript), + ) +} + +// LocalAmtIsDust returns true if the local output is dust. +func (c *CloseChannelTerms) LocalAmtIsDust() bool { + return c.LocalBalance.ToSatoshis() < lnwallet.DustLimitForSize( + len(c.LocalDeliveryScript), + ) +} + +// LocalCanPayFees returns true if the local party can pay the absolute fee +// from their local settled balance. +func (c *CloseChannelTerms) LocalCanPayFees(absoluteFee btcutil.Amount) bool { + return c.LocalBalance.ToSatoshis() >= absoluteFee +} + +// RemoteCanPayFees returns true if the remote party can pay the absolute fee +// from their remote settled balance. +func (c *CloseChannelTerms) RemoteCanPayFees(absoluteFee btcutil.Amount) bool { + return c.RemoteBalance.ToSatoshis() >= absoluteFee +} + +// LocalCloseStart is the state we enter into after we've received or sent +// shutdown, and the channel has been flushed. In this state, we'll emit a new +// event to send our offer to drive the rest of the process. +// +// transition: +// - fromState: ChannelFlushing +// - toState: LocalOfferSent +// +// input events: +// - SendOfferEvent +type LocalCloseStart struct { + CloseChannelTerms +} + +// ShouldRouteTo returns true if the target state should process the target +// event. +func (l *LocalCloseStart) ShouldRouteTo(event ProtocolEvent) bool { + switch event.(type) { + case *SendOfferEvent: + return true + default: + return false + } +} + +// IsTerminal returns true if the target state is a terminal state. +func (l *LocalCloseStart) IsTerminal() bool { + return false +} + +// protocolStateaSealed indicates that this struct is a ProtocolEvent instance. +func (l *LocalCloseStart) protocolStateSealed() {} + +// LocalOfferSent is the state we transition to after we reveiver the +// SendOfferEvent in the LocalCloseStart state. With this state we send our +// offer to the remote party, then await a sig from them which concludes the +// local cooperative close process. +// +// transition: +// - fromState: LocalCloseStart +// - toState: ClosePending +// +// input events: +// - LocalSigReceived +type LocalOfferSent struct { + CloseChannelTerms + + // ProposedFee is the fee we proposed to the remote party. + ProposedFee btcutil.Amount + + // LocalSig is the signature we sent to the remote party. + LocalSig lnwire.Sig +} + +// ShouldRouteTo returns true if the target state should process the target +// event. +func (l *LocalOfferSent) ShouldRouteTo(event ProtocolEvent) bool { + switch event.(type) { + case *LocalSigReceived: + return true + default: + return false + } +} + +// protocolStateaSealed indicates that this struct is a ProtocolEvent instance. +func (l *LocalOfferSent) protocolStateSealed() {} + +// IsTerminal returns true if the target state is a terminal state. +func (l *LocalOfferSent) IsTerminal() bool { + return false +} + +// ClosePending is the state we enter after concluding the negotiation for the +// remote or local state. At this point, given a confirmation notification we +// can terminate the process. Otherwise, we can receive a fresh CoopCloseReq to +// go back to the very start. +// +// transition: +// - fromState: LocalOfferSent || RemoteCloseStart +// - toState: CloseFin +// +// input events: +// - LocalSigReceived +// - OfferReceivedEvent +type ClosePending struct { + // CloseTx is the pending close transaction. + CloseTx *wire.MsgTx +} + +// ShouldRouteTo returns true if the target state should process the target +// event. +func (c *ClosePending) ShouldRouteTo(event ProtocolEvent) bool { + switch event.(type) { + case *SpendEvent: + return true + default: + return false + } +} + +// protocolStateSealed indicates that this struct is a ProtocolEvent instance. +func (c *ClosePending) protocolStateSealed() {} + +// IsTerminal returns true if the target state is a terminal state. +func (c *ClosePending) IsTerminal() bool { + return true +} + +// CloseFin is the terminal state for the channel closer state machine. At this +// point, the close tx has been confirmed on chain. +type CloseFin struct { + // ConfirmedTx is the transaction that confirmed the channel close. + ConfirmedTx *wire.MsgTx +} + +// protocolStateSealed indicates that this struct is a ProtocolEvent instance. +func (c *CloseFin) protocolStateSealed() {} + +// IsTerminal returns true if the target state is a terminal state. +func (c *CloseFin) IsTerminal() bool { + return true +} + +// RemoteCloseStart is similar to the LocalCloseStart, but is used to drive the +// process of signing an offer for the remote party +// +// transition: +// - fromState: ChannelFlushing +// - toState: ClosePending +type RemoteCloseStart struct { + CloseChannelTerms +} + +// ShouldRouteTo returns true if the target state should process the target +// event. +func (l *RemoteCloseStart) ShouldRouteTo(event ProtocolEvent) bool { + switch event.(type) { + case *OfferReceivedEvent: + return true + default: + return false + } +} + +// protocolStateSealed indicates that this struct is a ProtocolEvent instance. +func (l *RemoteCloseStart) protocolStateSealed() {} + +// IsTerminal returns true if the target state is a terminal state. +func (l *RemoteCloseStart) IsTerminal() bool { + return false +} + +// RbfChanCloser is a state machine that handles the RBF-enabled cooperative +// channel close protocol. +type RbfChanCloser = protofsm.StateMachine[ProtocolEvent, *Environment] + +// RbfChanCloserCfg is a configuration struct that is used to initialize a new +// RBF chan closer state machine. +type RbfChanCloserCfg = protofsm.StateMachineCfg[ProtocolEvent, *Environment] + +// RbfSpendMapper is a type used to map the generic spend event to one specific +// to this package. +type RbfSpendMapper = protofsm.SpendMapper[ProtocolEvent] + +func SpendMapper(spendEvent *chainntnfs.SpendDetail) ProtocolEvent { + return &SpendEvent{ + Tx: spendEvent.SpendingTx, + BlockHeight: uint32(spendEvent.SpendingHeight), + } +} + +// RbfMsgMapperT is a type used to map incoming wire messages to protocol +// events. +type RbfMsgMapperT = protofsm.MsgMapper[ProtocolEvent] + +// RbfState is a type alias for the state of the RBF channel closer. +type RbfState = protofsm.State[ProtocolEvent, *Environment] + +// RbfEvent is a type alias for the event type of the RBF channel closer. +type RbfEvent = protofsm.EmittedEvent[ProtocolEvent] From f6525c9e7d7dd3dadaddfd0a450fe244e480da88 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 31 Jan 2024 19:23:03 -0800 Subject: [PATCH 03/13] lnwallet: add ability to specify custom sequence for co-op close tx In this commit, we add the ability to specify a custom sequence for a co-op close tx. This'll come in handy later as the new co-op close process allows a party to set a custom sequence. --- lnwallet/channel.go | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/lnwallet/channel.go b/lnwallet/channel.go index d190acdf5e..8e1aed81b0 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -8199,6 +8199,8 @@ type chanCloseOpt struct { // transaction outputs. If this isn't set, then the default BIP-69 // sorting is used. customSort CloseSortFunc + + customSequence fn.Option[uint32] } // ChanCloseOpt is a closure type that cen be used to modify the set of default @@ -8232,6 +8234,14 @@ func WithExtraCloseOutputs(extraOutputs []CloseOutput) ChanCloseOpt { func WithCustomCoopSort(sorter CloseSortFunc) ChanCloseOpt { return func(opts *chanCloseOpt) { opts.customSort = sorter + } +} + +// WithCustomSequence can be used to specify a custom sequence number for the +// co-op close process. Otherwise, a default non-final sequence will be used. +func WithCustomSequence(sequence uint32) ChanCloseOpt { + return func(opts *chanCloseOpt) { + opts.customSequence = fn.Some(sequence) } } @@ -8294,6 +8304,12 @@ func (lc *LightningChannel) CreateCloseProposal(proposedFee btcutil.Amount, ) } + opts.customSequence.WhenSome(func(sequence uint32) { + closeTxOpts = append(closeTxOpts, WithCustomTxInSequence( + sequence, + )) + }) + closeTx, err := CreateCooperativeCloseTx( fundingTxIn(lc.channelState), lc.channelState.LocalChanCfg.DustLimit, lc.channelState.RemoteChanCfg.DustLimit, ourBalance, theirBalance, @@ -8395,6 +8411,12 @@ func (lc *LightningChannel) CompleteCooperativeClose( ) } + opts.customSequence.WhenSome(func(sequence uint32) { + closeTxOpts = append(closeTxOpts, WithCustomTxInSequence( + sequence, + )) + }) + // Create the transaction used to return the current settled balance // on this active channel back to both parties. In this current model, // the initiator pays full fees for the cooperative close transaction. @@ -9111,6 +9133,11 @@ type closeTxOpts struct { // transaction outputs. If this isn't set, then the default BIP-69 // sorting is used. customSort CloseSortFunc + + // customSequence is an optional custom sequence to set on the co-op + // close transaction. This gives slightly more control compared to the + // enableRBF option. + customSequence fn.Option[uint32] } // defaultCloseTxOpts returns a closeTxOpts struct with default values. @@ -9144,6 +9171,14 @@ func WithExtraTxCloseOutputs(extraOutputs []CloseOutput) CloseTxOpt { func WithCustomTxSort(sorter CloseSortFunc) CloseTxOpt { return func(opts *closeTxOpts) { opts.customSort = sorter + } +} + +// WithCustomTxInSequence allows a caller to set a custom sequence on the sole +// input of the co-op close tx. +func WithCustomTxInSequence(sequence uint32) CloseTxOpt { + return func(o *closeTxOpts) { + o.customSequence = fn.Some(sequence) } } @@ -9169,6 +9204,11 @@ func CreateCooperativeCloseTx(fundingTxIn wire.TxIn, fundingTxIn.Sequence = mempool.MaxRBFSequence } + // Otherwise, a custom sequence might be specified. + opts.customSequence.WhenSome(func(sequence uint32) { + fundingTxIn.Sequence = sequence + }) + // Construct the transaction to perform a cooperative closure of the // channel. In the event that one side doesn't have any settled funds // within the channel then a refund output for that particular side can @@ -9176,6 +9216,8 @@ func CreateCooperativeCloseTx(fundingTxIn wire.TxIn, closeTx := wire.NewMsgTx(2) closeTx.AddTxIn(&fundingTxIn) + // TODO(roasbeef): needs support for dropping inputs + // Create both cooperative closure outputs, properly respecting the // dust limits of both parties. var localOutputIdx fn.Option[int] From e47a6327450f0b14d2f4a62baaba661751c0a0dd Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 31 Jan 2024 19:23:19 -0800 Subject: [PATCH 04/13] lnwallet/chancloser: add state transitions for new protofsm rbf closer In this commit, we add the state transitions for the new protofsm based RBF chan closer. The underlying protocol is a new asymmetric co-op close process, wherein either side can initiate a chan closer, and use their settled funds to pay for fees within the channel. --- lnwallet/chancloser/chancloser.go | 12 +- lnwallet/chancloser/chancloser_test.go | 4 +- lnwallet/chancloser/rbf_coop_states.go | 5 + lnwallet/chancloser/rbf_coop_transitions.go | 1048 +++++++++++++++++++ 4 files changed, 1057 insertions(+), 12 deletions(-) create mode 100644 lnwallet/chancloser/rbf_coop_transitions.go diff --git a/lnwallet/chancloser/chancloser.go b/lnwallet/chancloser/chancloser.go index 398a8a9f3e..5081f0bebd 100644 --- a/lnwallet/chancloser/chancloser.go +++ b/lnwallet/chancloser/chancloser.go @@ -539,8 +539,8 @@ func (c *ChanCloser) AuxOutputs() fn.Option[AuxCloseOutputs] { // upfront script is set, we check whether it matches the script provided by // our peer. If they do not match, we use the disconnect function provided to // disconnect from the peer. -func validateShutdownScript(disconnect func() error, upfrontScript, - peerScript lnwire.DeliveryAddress, netParams *chaincfg.Params) error { +func validateShutdownScript(upfrontScript, peerScript lnwire.DeliveryAddress, + netParams *chaincfg.Params) error { // Either way, we'll make sure that the script passed meets our // standards. The upfrontScript should have already been checked at an @@ -568,12 +568,6 @@ func validateShutdownScript(disconnect func() error, upfrontScript, chancloserLog.Warnf("peer's script: %x does not match upfront "+ "shutdown script: %x", peerScript, upfrontScript) - // Disconnect from the peer because they have violated option upfront - // shutdown. - if err := disconnect(); err != nil { - return err - } - return ErrUpfrontShutdownScriptMismatch } @@ -630,7 +624,6 @@ func (c *ChanCloser) ReceiveShutdown(msg lnwire.Shutdown) ( // If the remote node opened the channel with option upfront // shutdown script, check that the script they provided matches. if err := validateShutdownScript( - c.cfg.Disconnect, c.cfg.Channel.RemoteUpfrontShutdownScript(), msg.Address, c.cfg.ChainParams, ); err != nil { @@ -681,7 +674,6 @@ func (c *ChanCloser) ReceiveShutdown(msg lnwire.Shutdown) ( // If the remote node opened the channel with option upfront // shutdown script, check that the script they provided matches. if err := validateShutdownScript( - c.cfg.Disconnect, c.cfg.Channel.RemoteUpfrontShutdownScript(), msg.Address, c.cfg.ChainParams, ); err != nil { diff --git a/lnwallet/chancloser/chancloser_test.go b/lnwallet/chancloser/chancloser_test.go index fe71fe5e3b..d0211414c5 100644 --- a/lnwallet/chancloser/chancloser_test.go +++ b/lnwallet/chancloser/chancloser_test.go @@ -129,8 +129,8 @@ func TestMaybeMatchScript(t *testing.T) { t.Parallel() err := validateShutdownScript( - func() error { return nil }, test.upfrontScript, - test.shutdownScript, &chaincfg.SimNetParams, + test.upfrontScript, test.shutdownScript, + &chaincfg.SimNetParams, ) if err != test.expectedErr { diff --git a/lnwallet/chancloser/rbf_coop_states.go b/lnwallet/chancloser/rbf_coop_states.go index 764cdecb20..613e7c690d 100644 --- a/lnwallet/chancloser/rbf_coop_states.go +++ b/lnwallet/chancloser/rbf_coop_states.go @@ -250,6 +250,11 @@ type ChanStateObserver interface { // new outgoing add messages. DisableOutgoingAdds() error + // DisableChannel attempts to disable a channel (marking it ineligible + // to forward), and also sends out a network update to disable the + // channel. + DisableChannel() error + // MarkCoopBroadcasted persistently marks that the channel close // transaction has been broadcast. MarkCoopBroadcasted(*wire.MsgTx, bool) error diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go new file mode 100644 index 0000000000..b3a038f38a --- /dev/null +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -0,0 +1,1048 @@ +package chancloser + +import ( + "fmt" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/mempool" + "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/fn/v2" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/labels" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnutils" + "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/protofsm" + "github.com/lightningnetwork/lnd/tlv" +) + +// sendShutdownEvents is a helper function that returns a set of daemon events +// we need to emit when we decide that we should send a shutdown message. We'll +// also mark the channel as borked as well, as at this point, we no longer want +// to continue with normal operation. +func sendShutdownEvents(chanID lnwire.ChannelID, chanPoint wire.OutPoint, + deliveryAddr lnwire.DeliveryAddress, peerPub btcec.PublicKey, + postSendEvent fn.Option[ProtocolEvent], + chanState ChanStateObserver) (protofsm.DaemonEventSet, error) { + + // We'll emit a daemon event that instructs the daemon to send out a + // new shutdown message to the remote peer. + msgsToSend := &protofsm.SendMsgEvent[ProtocolEvent]{ + TargetPeer: peerPub, + Msgs: []lnwire.Message{&lnwire.Shutdown{ + ChannelID: chanID, + Address: deliveryAddr, + }}, + SendWhen: fn.Some(func() bool { + ok := chanState.NoDanglingUpdates() + if ok { + chancloserLog.Infof("ChannelPoint(%v): no "+ + "dangling updates sending shutdown "+ + "message", chanPoint) + } + + return ok + }), + PostSendEvent: postSendEvent, + } + + // If a close is already in process (we're in the RBF loop), then we + // can skip everything below, and just send out the shutdown message. + if chanState.FinalBalances().IsSome() { + return protofsm.DaemonEventSet{msgsToSend}, nil + } + + // Before closing, we'll attempt to send a disable update for the + // channel. We do so before closing the channel as otherwise the + // current edge policy won't be retrievable from the graph. + if err := chanState.DisableChannel(); err != nil { + return nil, fmt.Errorf("unable to disable channel: %w", err) + } + + // If we have a post-send event, then this means that we're the + // responder. We'll use this fact below to update state in the DB. + isInitiator := postSendEvent.IsNone() + + chancloserLog.Infof("ChannelPoint(%v): disabling outgoing adds", + chanPoint) + + // As we're about to send a shutdown, we'll disable adds in the + // outgoing direction. + if err := chanState.DisableOutgoingAdds(); err != nil { + return nil, fmt.Errorf("unable to disable outgoing "+ + "adds: %w", err) + } + + // To be able to survive a restart, we'll also write to disk + // information about the shutdown we're about to send out. + err := chanState.MarkShutdownSent(deliveryAddr, isInitiator) + if err != nil { + return nil, fmt.Errorf("unable to mark shutdown sent: %w", err) + } + + chancloserLog.Debugf("ChannelPoint(%v): marking channel as borked", + chanPoint) + + return protofsm.DaemonEventSet{msgsToSend}, nil +} + +// validateShutdown is a helper function that validates that the shutdown has a +// proper delivery script, and can be sent based on the current thaw height of +// the channel. +func validateShutdown(chanThawHeight fn.Option[uint32], + upfrontAddr fn.Option[lnwire.DeliveryAddress], + msg *ShutdownReceived, chanPoint wire.OutPoint, + chainParams chaincfg.Params) error { + + // If we've received a shutdown message, and we have a thaw height, + // then we need to make sure that the channel can now be co-op closed. + err := fn.MapOptionZ(chanThawHeight, func(thawHeight uint32) error { + // If the current height is below the thaw height, then we'll + // reject the shutdown message as we can't yet co-op close the + // channel. + if msg.BlockHeight < thawHeight { + return fmt.Errorf("initiator attempting to "+ + "co-op close frozen ChannelPoint(%v) "+ + "(current_height=%v, thaw_height=%v)", + chanPoint, msg.BlockHeight, + thawHeight) + } + + return nil + }) + if err != nil { + return err + } + + // Next, we'll verify that the remote party is sending the expected + // shutdown script. + return fn.MapOption(func(addr lnwire.DeliveryAddress) error { + return validateShutdownScript( + addr, msg.ShutdownScript, &chainParams, + ) + })(upfrontAddr).UnwrapOr(nil) +} + +// ProcessEvent takes a protocol event, and implements a state transition for +// the state. From this state, we can receive two possible incoming events: +// SendShutdown and ShutdownReceived. Both of these will transition us to the +// ChannelFlushing state. +func (c *ChannelActive) ProcessEvent(event ProtocolEvent, env *Environment, +) (*CloseStateTransition, error) { + + switch msg := event.(type) { + // If we get a confirmation, then a prior transaction we broadcasted + // has confirmed, so we can move to our terminal state early. + case *SpendEvent: + return &CloseStateTransition{ + NextState: &CloseFin{ + ConfirmedTx: msg.Tx, + }, + }, nil + + // If we receive the SendShutdown event, then we'll send our shutdown + // with a special SendPredicate, then go to the ShutdownPending where + // we'll wait for the remote to send their shutdown. + case *SendShutdown: + // If we have an upfront shutdown addr or a delivery addr then + // we'll use that. Otherwise, we'll generate a new delivery + // addr. + shutdownScript, err := env.LocalUpfrontShutdown.Alt( + msg.DeliveryAddr, + ).UnwrapOrFuncErr(env.NewDeliveryScript) + if err != nil { + return nil, err + } + + // We'll emit some daemon events to send the shutdown message + // and disable the channel on the network level. In this case, + // we don't need a post send event as receive their shutdown is + // what'll move us beyond the ShutdownPending state. + daemonEvents, err := sendShutdownEvents( + env.ChanID, env.ChanPoint, shutdownScript, + env.ChanPeer, fn.None[ProtocolEvent](), + env.ChanObserver, + ) + if err != nil { + return nil, err + } + + chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg, "+ + "delivery_script=%v", env.ChanPoint, shutdownScript) + + // From here, we'll transition to the shutdown pending state. In + // this state we await their shutdown message (self loop), then + // also the flushing event. + return &CloseStateTransition{ + NextState: &ShutdownPending{ + IdealFeeRate: fn.Some(msg.IdealFeeRate), + ShutdownScripts: ShutdownScripts{ + LocalDeliveryScript: shutdownScript, + }, + }, + NewEvents: fn.Some(RbfEvent{ + ExternalEvents: daemonEvents, + }), + }, nil + + // When we receive a shutdown from the remote party, we'll validate the + // shutdown message, then transition to the ShutdownPending state. We'll + // also emit similar events like the above to send out shutdown, and + // also disable the channel. + case *ShutdownReceived: + chancloserLog.Infof("ChannelPoint(%v): received shutdown msg") + + // Validate that they can send the message now, and also that + // they haven't violated their commitment to a prior upfront + // shutdown addr. + err := validateShutdown( + env.ThawHeight, env.RemoteUpfrontShutdown, msg, + env.ChanPoint, env.ChainParams, + ) + if err != nil { + chancloserLog.Errorf("ChannelPoint(%v): rejecting "+ + "shutdown attempt: %v", err) + + return nil, err + } + + // If we have an upfront shutdown addr we'll use that, + // otherwise, we'll generate a new delivery script. + shutdownAddr, err := env.LocalUpfrontShutdown.UnwrapOrFuncErr( + env.NewDeliveryScript, + ) + if err != nil { + return nil, err + } + + chancloserLog.Infof("ChannelPoint(%v): sending shutdown msg "+ + "at next clean commit state", env.ChanPoint) + + // Now that we know the shutdown message is valid, we'll obtain + // the set of daemon events we need to emit. We'll also specify + // that once the message has actually been sent, that we + // generate receive an input event of a ShutdownComplete. + daemonEvents, err := sendShutdownEvents( + env.ChanID, env.ChanPoint, shutdownAddr, + env.ChanPeer, + fn.Some[ProtocolEvent](&ShutdownComplete{}), + env.ChanObserver, + ) + if err != nil { + return nil, err + } + + chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds", + env.ChanPoint) + + // We just received a shutdown, so we'll disable the adds in + // the outgoing direction. + if err := env.ChanObserver.DisableIncomingAdds(); err != nil { + return nil, fmt.Errorf("unable to disable incoming "+ + "adds: %w", err) + } + + remoteAddr := msg.ShutdownScript + + return &CloseStateTransition{ + NextState: &ShutdownPending{ + ShutdownScripts: ShutdownScripts{ + LocalDeliveryScript: shutdownAddr, + RemoteDeliveryScript: remoteAddr, + }, + }, + NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{ + ExternalEvents: daemonEvents, + }), + }, nil + + // Any other messages in this state will result in an error, as this is + // an undefined state transition. + default: + return nil, fmt.Errorf("%w: received %T while in ChannelActive", + ErrInvalidStateTransition, msg) + } +} + +// ProcessEvent takes a protocol event, and implements a state transition for +// the state. Our path to this state will determine the set of valid events. If +// we were the one that sent the shutdown, then we'll just wait on the +// ShutdownReceived event. Otherwise, we received the shutdown, and can move +// forward once we receive the ShutdownComplete event. Receiving +// ShutdownComplete means that we've sent our shutdown, as this was specified +// as a post send event. +func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment, +) (*CloseStateTransition, error) { + + switch msg := event.(type) { + // If we get a confirmation, then a prior transaction we broadcasted + // has confirmed, so we can move to our terminal state early. + case *SpendEvent: + return &CloseStateTransition{ + NextState: &CloseFin{ + ConfirmedTx: msg.Tx, + }, + }, nil + + // When we receive a shutdown from the remote party, we'll validate the + // shutdown message, then transition to the ChannelFlushing state. + case *ShutdownReceived: + chancloserLog.Infof("ChannelPoint(%v): received shutdown msg", + env.ChanPoint) + + // Validate that they can send the message now, and also that + // they haven't violated their commitment to a prior upfront + // shutdown addr. + err := validateShutdown( + env.ThawHeight, env.RemoteUpfrontShutdown, msg, + env.ChanPoint, env.ChainParams, + ) + if err != nil { + chancloserLog.Errorf("ChannelPoint(%v): rejecting "+ + "shutdown attempt: %v", err) + + return nil, err + } + + // If the channel is *already* flushed, and the close is + // already in progress, then we can skip the flushing state and + // go straight into negotiation, as this is the RBF loop. + var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]] + finalBalances := env.ChanObserver.FinalBalances().UnwrapOr( + unknownBalance, + ) + if finalBalances != unknownBalance { + channelFlushed := ProtocolEvent(&ChannelFlushed{ + ShutdownBalances: finalBalances, + }) + eventsToEmit = fn.Some(RbfEvent{ + InternalEvent: []ProtocolEvent{ + channelFlushed, + }, + }) + } + + chancloserLog.Infof("ChannelPoint(%v): disabling incoming adds", + env.ChanPoint) + + // We just received a shutdown, so we'll disable the adds in + // the outgoing direction. + if err := env.ChanObserver.DisableIncomingAdds(); err != nil { + return nil, fmt.Errorf("unable to disable incoming "+ + "adds: %w", err) + } + + chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+ + "be flushed...", env.ChanPoint) + + // We transition to the ChannelFlushing state, where we await + // the ChannelFlushed event. + return &CloseStateTransition{ + NextState: &ChannelFlushing{ + IdealFeeRate: s.IdealFeeRate, + ShutdownScripts: ShutdownScripts{ + LocalDeliveryScript: s.LocalDeliveryScript, //nolint:ll + RemoteDeliveryScript: msg.ShutdownScript, //nolint:ll + }, + }, + NewEvents: eventsToEmit, + }, nil + + // If we get this message, then this means that we were finally able to + // send out shutdown after receiving it from the remote party. We'll + // now transition directly to the ChannelFlushing state. + case *ShutdownComplete: + chancloserLog.Infof("ChannelPoint(%v): waiting for channel to "+ + "be flushed...", env.ChanPoint) + + // If the channel is *already* flushed, and the close is + // already in progress, then we can skip the flushing state and + // go straight into negotiation, as this is the RBF loop. + var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]] + finalBalances := env.ChanObserver.FinalBalances().UnwrapOr( + unknownBalance, + ) + if finalBalances != unknownBalance { + channelFlushed := ProtocolEvent(&ChannelFlushed{ + ShutdownBalances: finalBalances, + }) + eventsToEmit = fn.Some(RbfEvent{ + InternalEvent: []ProtocolEvent{ + channelFlushed, + }, + }) + } + + // From here, we'll transition to the channel flushing state. + // We'll stay here until we receive the ChannelFlushed event. + return &CloseStateTransition{ + NextState: &ChannelFlushing{ + IdealFeeRate: s.IdealFeeRate, + ShutdownScripts: s.ShutdownScripts, + }, + NewEvents: eventsToEmit, + }, nil + + // Any other messages in this state will result in an error, as this is + // an undefined state transition. + default: + return nil, fmt.Errorf("%w: received %T while in "+ + "ShutdownPending", ErrInvalidStateTransition, msg) + } +} + +// ProcessEvent takes a new protocol event, and figures out if we can +// transition to the next state, or just loop back upon ourself. If we receive +// a ShutdownReceived event, then we'll stay in the ChannelFlushing state, as +// we haven't yet fully cleared the channel. Otherwise, we can move to the +// CloseReady state which'll being the channel closing process. +func (c *ChannelFlushing) ProcessEvent(event ProtocolEvent, env *Environment, +) (*CloseStateTransition, error) { + + switch msg := event.(type) { + // If we get a confirmation, then a prior transaction we broadcasted + // has confirmed, so we can move to our terminal state early. + case *SpendEvent: + return &CloseStateTransition{ + NextState: &CloseFin{ + ConfirmedTx: msg.Tx, + }, + }, nil + + // If we get an OfferReceived event, then the channel is flushed from + // the PoV of the remote party. However, due to propagation delay or + // concurrency, we may not have received the ChannelFlushed event yet. + // In this case, we'll stash the event and wait for the ChannelFlushed + // event. + case *OfferReceivedEvent: + chancloserLog.Infof("ChannelPoint(%v): received remote offer "+ + "early, stashing...", env.ChanPoint) + + c.EarlyRemoteOffer = fn.Some(*msg) + + // TODO(roasbeef): unit test! + // * actually do this ^ + + // We'll perform a noop update so we can wait for the actual + // channel flushed event. + return &CloseStateTransition{ + NextState: c, + }, nil + + // If we receive the ChannelFlushed event, then the coast is clear so + // we'll now morph into the dual peer state so we can handle any + // messages needed to drive forward the close process. + case *ChannelFlushed: + // Both the local and remote losing negotiation needs the terms + // we'll be using to close the channel, so we'll create them + // here. + closeTerms := CloseChannelTerms{ + ShutdownScripts: c.ShutdownScripts, + ShutdownBalances: msg.ShutdownBalances, + } + + chancloserLog.Infof("ChannelPoint(%v): channel flushed! "+ + "proceeding with co-op close", env.ChanPoint) + + // Now that the channel has been flushed, we'll mark on disk + // that we're approaching the point of no return where we'll + // send a new signature to the remote party. + // + // TODO(roasbeef): doesn't actually matter if initiator here? + if msg.FreshFlush { + err := env.ChanObserver.MarkCoopBroadcasted(nil, true) + if err != nil { + return nil, err + } + } + + // If an ideal fee rate was specified, then we'll use that, + // otherwise we'll fall back to the default value given in the + // env. + idealFeeRate := c.IdealFeeRate.UnwrapOr(env.DefaultFeeRate) + + // We'll then use that fee rate to determine the absolute fee + // we'd propose. + // + // TODO(roasbeef): need to sign the 3 diff versions of this? + localTxOut, remoteTxOut := closeTerms.DeriveCloseTxOuts() + absoluteFee := env.FeeEstimator.EstimateFee( + env.ChanType, localTxOut, remoteTxOut, + idealFeeRate.FeePerKWeight(), + ) + + chancloserLog.Infof("ChannelPoint(%v): using ideal_fee=%v, "+ + "absolute_fee=%v", env.ChanPoint, idealFeeRate, + absoluteFee) + + var ( + internalEvents []ProtocolEvent + newEvents fn.Option[RbfEvent] + ) + + // If we received a remote offer early from the remote party, + // then we'll add that to the set of internal events to emit. + c.EarlyRemoteOffer.WhenSome(func(offer OfferReceivedEvent) { + internalEvents = append(internalEvents, &offer) + }) + + // Only if we have enough funds to pay for the fees do we need + // to emit a localOfferSign event. + // + // TODO(roasbeef): also only proceed if was higher than fee in + // last round? + if closeTerms.LocalCanPayFees(absoluteFee) { + // Each time we go into this negotiation flow, we'll + // kick off our local state with a new close attempt. + // So we'll emit a internal event to drive forward that + // part of the state. + localOfferSign := ProtocolEvent(&SendOfferEvent{ + TargetFeeRate: idealFeeRate, + }) + internalEvents = append(internalEvents, localOfferSign) + } else { + chancloserLog.Infof("ChannelPoint(%v): unable to pay "+ + "fees with local balance, skipping "+ + "closing_complete", env.ChanPoint) + } + + if len(internalEvents) > 0 { + newEvents = fn.Some(RbfEvent{ + InternalEvent: internalEvents, + }) + } + + return &CloseStateTransition{ + NextState: &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &LocalCloseStart{ + CloseChannelTerms: closeTerms, + }, + Remote: &RemoteCloseStart{ + CloseChannelTerms: closeTerms, + }, + }, + }, + NewEvents: newEvents, + }, nil + + default: + return nil, fmt.Errorf("%w: received %T while in "+ + "ChannelFlushing", ErrInvalidStateTransition, msg) + } +} + +// processNegotiateEvent is a helper function that processes a new event to +// local channel state once we're in the ClosingNegotiation state. +func processNegotiateEvent(c *ClosingNegotiation, event ProtocolEvent, + env *Environment, chanPeer lntypes.ChannelParty, +) (*CloseStateTransition, error) { + + targetPeerState := c.PeerState.GetForParty(chanPeer) + + // Drive forward the remote state based on the next event. + transition, err := targetPeerState.ProcessEvent( + event, env, + ) + if err != nil { + return nil, err + } + + nextPeerState, ok := transition.NextState.(AsymmetricPeerState) //nolint:ll + if !ok { + return nil, fmt.Errorf("expected %T to be "+ + "AsymmetricPeerState", transition.NextState) + } + + // Make a copy of the input state, then update the peer state of the + // proper party. + newPeerState := *c + newPeerState.PeerState.SetForParty(chanPeer, nextPeerState) + + return &CloseStateTransition{ + NextState: &newPeerState, + NewEvents: transition.NewEvents, + }, nil +} + +// ProcessEvent drives forward the composite states for the local and remote +// party in response to new events. From this state, we'll continue to drive +// forward the local and remote states until we arrive at the StateFin stage, +// or we loop back up to the ShutdownPending state. +func (c *ClosingNegotiation) ProcessEvent(event ProtocolEvent, env *Environment, +) (*CloseStateTransition, error) { + + // There're two classes of events that can break us out of this state: + // we receive a confirmation event, or we receive a signal to restart + // the co-op close process. + switch msg := event.(type) { + // If we get a confirmation, then the spend request we issued when we + // were leaving the ChannelFlushing state has been confirmed. We'll + // now transition to the StateFin state. + case *SpendEvent: + return &CloseStateTransition{ + NextState: &CloseFin{ + ConfirmedTx: msg.Tx, + }, + }, nil + + // Otherwise, if we receive a shutdown, or receive an event to send a + // shutdown, then we'll go back up to the ChannelActive state, and have + // it handle this event by emitting an internal event. + // + // TODO(roasbeef): both will have fee rate specified, so ok? + case *ShutdownReceived, *SendShutdown: + chancloserLog.Infof("ChannelPoint(%v): RBF case triggered, "+ + "restarting negotiation", env.ChanPoint) + + return &CloseStateTransition{ + NextState: &ChannelActive{}, + NewEvents: fn.Some(RbfEvent{ + InternalEvent: []ProtocolEvent{event}, + }), + }, nil + } + + // If we get to this point, then we have an event that'll drive forward + // the negotiation process. Based on the event, we'll figure out which + // state we'll be modifying. + switch { + case c.PeerState.GetForParty(lntypes.Local).ShouldRouteTo(event): + chancloserLog.Infof("ChannelPoint(%v): routing %T to local "+ + "chan state", env.ChanPoint, event) + + // Drive forward the local state based on the next event. + return processNegotiateEvent(c, event, env, lntypes.Local) + + case c.PeerState.GetForParty(lntypes.Remote).ShouldRouteTo(event): + chancloserLog.Infof("ChannelPoint(%v): routing %T to remote "+ + "chan state", env.ChanPoint, event) + + // Drive forward the remote state based on the next event. + return processNegotiateEvent(c, event, env, lntypes.Remote) + } + + return nil, fmt.Errorf("%w: received %T while in ClosingNegotiation", + ErrInvalidStateTransition, event) +} + +// newSigTlv is a helper function that returns a new optional TLV sig field for +// the parametrized tlv.TlvType value. +func newSigTlv[T tlv.TlvType](s lnwire.Sig) tlv.OptionalRecordT[T, lnwire.Sig] { + return tlv.SomeRecordT(tlv.NewRecordT[T](s)) +} + +// ProcessEvent implements the event processing to kick off the process of +// obtaining a new (possibly RBF'd) signature for our commitment transaction. +func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, +) (*CloseStateTransition, error) { + + switch msg := event.(type) { //nolint:gocritic + // If we receive a SendOfferEvent, then we'll use the specified fee + // rate to generate for the closing transaction with our ideal fee + // rate. + case *SendOfferEvent: + // First, we'll figure out the absolute fee rate we should pay + // given the state of the local/remote outputs. + localTxOut, remoteTxOut := l.DeriveCloseTxOuts() + absoluteFee := env.FeeEstimator.EstimateFee( + env.ChanType, localTxOut, remoteTxOut, + msg.TargetFeeRate.FeePerKWeight(), + ) + + // Now that we know what fee we want to pay, we'll create a new + // signature over our co-op close transaction. For our + // proposals, we'll just always use the known RBF sequence + // value. + localScript := l.LocalDeliveryScript + rawSig, closeTx, closeBalance, err := env.CloseSigner.CreateCloseProposal( //nolint:ll + absoluteFee, localScript, l.RemoteDeliveryScript, + lnwallet.WithCustomSequence(mempool.MaxRBFSequence), + ) + if err != nil { + return nil, err + } + wireSig, err := lnwire.NewSigFromSignature(rawSig) + if err != nil { + return nil, err + } + + chancloserLog.Infof("closing w/ local_addr=%x, "+ + "remote_addr=%x, fee=%v", localScript[:], + l.RemoteDeliveryScript[:], absoluteFee) + + chancloserLog.Infof("proposing closing_tx=%v", + spew.Sdump(closeTx)) + + // Now that we have our signature, we'll set the proper + // closingSigs field based on if the remote party's output is + // dust or not. + var closingSigs lnwire.ClosingSigs + switch { + // If the remote party's output is dust, then we'll set the + // CloserNoClosee field. + case remoteTxOut == nil: + closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1]( + wireSig, + ) + + // If after paying for fees, our balance is below dust, then + // we'll set the NoCloserClosee field. + case closeBalance < lnwallet.DustLimitForSize(len(localScript)): + closingSigs.NoCloserClosee = newSigTlv[tlv.TlvType2]( + wireSig, + ) + + // Otherwise, we'll set the CloserAndClosee field. + // + // TODO(roasbeef): should actually set both?? + default: + closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3]( + wireSig, + ) + } + + // Now that we have our sig, we'll emit a daemon event to send + // it to the remote party, then transition to the + // LocalOfferSent state. + // + // TODO(roasbeef): type alias for protocol event + sendEvent := protofsm.DaemonEventSet{&protofsm.SendMsgEvent[ProtocolEvent]{ //nolint:ll + TargetPeer: env.ChanPeer, + // TODO(roasbeef): mew new func + Msgs: []lnwire.Message{&lnwire.ClosingComplete{ + ChannelID: env.ChanID, + FeeSatoshis: absoluteFee, + Sequence: mempool.MaxRBFSequence, + ClosingSigs: closingSigs, + }}, + }} + + chancloserLog.Infof("ChannelPoint(%v): sending closing sig "+ + "to remote party, fee_sats=%v", env.ChanPoint, + absoluteFee) + + return &CloseStateTransition{ + NextState: &LocalOfferSent{ + ProposedFee: absoluteFee, + LocalSig: wireSig, + CloseChannelTerms: l.CloseChannelTerms, + }, + NewEvents: fn.Some(RbfEvent{ + ExternalEvents: sendEvent, + }), + }, nil + } + + return nil, fmt.Errorf("%w: received %T while in LocalCloseStart", + ErrInvalidStateTransition, event) +} + +// extractSig extracts the expected signature from the closing sig message. +// Only one of them should actually be populated as the closing sig message is +// sent in response to a ClosingComplete message, it should only sign the same +// version of the co-op close tx as the sender did. +func extractSig(msg lnwire.ClosingSig) fn.Result[lnwire.Sig] { + // First, we'll validate that only one signature is included in their + // response to our initial offer. If not, then we'll exit here, and + // trigger a recycle of the connection. + sigInts := []bool{ + msg.CloserNoClosee.IsSome(), msg.NoCloserClosee.IsSome(), + msg.CloserAndClosee.IsSome(), + } + numSigs := fn.Foldl(0, sigInts, func(acc int, sigInt bool) int { + if sigInt { + return acc + 1 + } + + return acc + }) + if numSigs != 1 { + return fn.Errf[lnwire.Sig]("%w: only one sig should be set, "+ + "got %v", ErrTooManySigs, numSigs) + } + + // The final sig is the one that's actually set. + sig := msg.CloserAndClosee.ValOpt().Alt( + msg.NoCloserClosee.ValOpt(), + ).Alt( + msg.CloserNoClosee.ValOpt(), + ) + + return fn.NewResult(sig.UnwrapOrErr(ErrNoSig)) +} + +// ProcessEvent implements the state transition function for the +// LocalOfferSent state. In this state, we'll wait for the remote party to +// send a close_signed message which gives us the ability to broadcast a new +// co-op close transaction. +func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent, env *Environment, +) (*CloseStateTransition, error) { + + switch msg := event.(type) { //nolint:gocritic + // If we receive a LocalSigReceived event, then we'll attempt to + // validate the signature from the remote party. If valid, then we can + // broadcast the transaction, and transition to the ClosePending state. + case *LocalSigReceived: + // Extract and validate that only one sig field is set. + sig, err := extractSig(msg.SigMsg).Unpack() + if err != nil { + return nil, err + } + + remoteSig, err := sig.ToSignature() + if err != nil { + return nil, err + } + localSig, err := l.LocalSig.ToSignature() + if err != nil { + return nil, err + } + + // Now that we have their signature, we'll attempt to validate + // it, then extract a valid closing signature from it. + closeTx, _, err := env.CloseSigner.CompleteCooperativeClose( + localSig, remoteSig, l.LocalDeliveryScript, + l.RemoteDeliveryScript, l.ProposedFee, + lnwallet.WithCustomSequence(mempool.MaxRBFSequence), + ) + if err != nil { + return nil, err + } + + // As we're about to broadcast a new version of the co-op close + // transaction, we'll mark again as broadcast, but with this + // variant of the co-op close tx. + err = env.ChanObserver.MarkCoopBroadcasted(closeTx, true) + if err != nil { + return nil, err + } + + broadcastEvent := protofsm.DaemonEventSet{&protofsm.BroadcastTxn{ //nolint:ll + Tx: closeTx, + Label: labels.MakeLabel( + labels.LabelTypeChannelClose, &env.Scid, + ), + }} + + chancloserLog.Infof("ChannelPoint(%v): received sig from "+ + "remote party, broadcasting: tx=%v", env.ChanPoint, + lnutils.SpewLogClosure(closeTx), + ) + + return &CloseStateTransition{ + NextState: &ClosePending{ + CloseTx: closeTx, + }, + NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{ + ExternalEvents: broadcastEvent, + }), + }, nil + } + + return nil, fmt.Errorf("%w: received %T while in LocalOfferSent", + ErrInvalidStateTransition, event) +} + +// ProcessEvent implements the state transition function for the +// RemoteCloseStart. In this state, we'll wait for the remote party to send a +// closing_complete message. Assuming they can pay for the fees, we'll sign it +// ourselves, then transition to the next state of ClosePending. +func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, +) (*CloseStateTransition, error) { + + switch msg := event.(type) { //nolint:gocritic + // If we receive a OfferReceived event, we'll make sure they can + // actually pay for the fee. If so, then we'll counter sign and + // transition to a terminal state. + case *OfferReceivedEvent: + // To start, we'll perform some basic validation of the sig + // message they've sent. We'll validate that the remote party + // actually has enough fees to pay the closing fees. + switch { + case !l.RemoteCanPayFees(msg.SigMsg.FeeSatoshis): + return nil, fmt.Errorf("%w: %v vs %v", + ErrRemoteCannotPay, + msg.SigMsg.FeeSatoshis, + l.RemoteBalance.ToSatoshis()) + + // The sequence they send can't be the max sequence, as that would + // prevent RBF. + case msg.SigMsg.Sequence > mempool.MaxRBFSequence: + return nil, fmt.Errorf("%w: %v", ErrNonFinalSequence, + msg.SigMsg.Sequence) + } + + // With the basic sanity checks out of the way, we'll now + // figure out which signature that we'll attempt to sign + // against. + var ( + remoteSig input.Signature + noClosee bool + ) + switch { + // If our balance is dust, then we expect the CloserNoClosee + // sig to be set. + case l.LocalAmtIsDust(): + if msg.SigMsg.CloserNoClosee.IsNone() { + return nil, ErrCloserNoClosee + } + msg.SigMsg.CloserNoClosee.WhenSomeV(func(s lnwire.Sig) { + remoteSig, _ = s.ToSignature() + noClosee = true + }) + + // Otherwise, we'll assume that CloseAndClosee is set. + // + // TODO(roasbeef): NoCloserClosee, but makes no sense? + default: + if msg.SigMsg.CloserAndClosee.IsNone() { + return nil, ErrCloserAndClosee + } + msg.SigMsg.CloserAndClosee.WhenSomeV(func(s lnwire.Sig) { //nolint:ll + remoteSig, _ = s.ToSignature() + }) + } + + chanOpts := []lnwallet.ChanCloseOpt{ + lnwallet.WithCustomSequence(msg.SigMsg.Sequence), + } + + chancloserLog.Infof("responding to close w/ local_addr=%x, "+ + "remote_addr=%x, fee=%v", + l.LocalDeliveryScript[:], l.RemoteDeliveryScript[:], + msg.SigMsg.FeeSatoshis) + + // Now that we have the remote sig, we'll sign the version they + // signed, then attempt to complete the cooperative close + // process. + // + // TODO(roasbeef): need to be able to omit an output when + // signing based on the above, as closing opt + rawSig, _, _, err := env.CloseSigner.CreateCloseProposal( + msg.SigMsg.FeeSatoshis, l.LocalDeliveryScript, + l.RemoteDeliveryScript, chanOpts..., + ) + if err != nil { + return nil, err + } + wireSig, err := lnwire.NewSigFromSignature(rawSig) + if err != nil { + return nil, err + } + + localSig, err := wireSig.ToSignature() + if err != nil { + return nil, err + } + + // With our signature created, we'll now attempt to finalize the + // close process. + closeTx, _, err := env.CloseSigner.CompleteCooperativeClose( + localSig, remoteSig, l.LocalDeliveryScript, + l.RemoteDeliveryScript, msg.SigMsg.FeeSatoshis, + chanOpts..., + ) + if err != nil { + return nil, err + } + + chancloserLog.Infof("ChannelPoint(%v): received sig (fee=%v "+ + "sats) from remote party, signing new tx=%v", + env.ChanPoint, msg.SigMsg.FeeSatoshis, + lnutils.SpewLogClosure(closeTx), + ) + + var closingSigs lnwire.ClosingSigs + if noClosee { + closingSigs.CloserNoClosee = newSigTlv[tlv.TlvType1]( + wireSig, + ) + } else { + closingSigs.CloserAndClosee = newSigTlv[tlv.TlvType3]( + wireSig, + ) + } + + // As we're about to broadcast a new version of the co-op close + // transaction, we'll mark again as broadcast, but with this + // variant of the co-op close tx. + // + // TODO(roasbeef): db will only store one instance, store both? + err = env.ChanObserver.MarkCoopBroadcasted(closeTx, false) + if err != nil { + return nil, err + } + + // As we transition, we'll omit two events: one to broadcast + // the transaction, and the other to send our ClosingSig + // message to the remote party. + sendEvent := &protofsm.SendMsgEvent[ProtocolEvent]{ + TargetPeer: env.ChanPeer, + Msgs: []lnwire.Message{&lnwire.ClosingSig{ + ChannelID: env.ChanID, + ClosingSigs: closingSigs, + }}, + } + broadcastEvent := &protofsm.BroadcastTxn{ + Tx: closeTx, + Label: labels.MakeLabel( + labels.LabelTypeChannelClose, &env.Scid, + ), + } + daemonEvents := protofsm.DaemonEventSet{ + sendEvent, broadcastEvent, + } + + // Now that we've extracted the signature, we'll transition to + // the next state where we'll sign+broadcast the sig. + return &CloseStateTransition{ + NextState: &ClosePending{ + CloseTx: closeTx, + }, + NewEvents: fn.Some(protofsm.EmittedEvent[ProtocolEvent]{ + ExternalEvents: daemonEvents, + }), + }, nil + } + + return nil, fmt.Errorf("%w: received %T while in RemoteCloseStart", + ErrInvalidStateTransition, event) +} + +// ProcessEvent is a semi-terminal state in the rbf-coop close state machine. +// In this state, we're waiting for either a confirmation, or for either side +// to attempt to create a new RBF'd co-op close transaction. +func (c *ClosePending) ProcessEvent(event ProtocolEvent, env *Environment, +) (*CloseStateTransition, error) { + + switch msg := event.(type) { + // If we can a spend while waiting for the close, then we'll go to our + // terminal state. + case *SpendEvent: + return &CloseStateTransition{ + NextState: &CloseFin{ + ConfirmedTx: msg.Tx, + }, + }, nil + + default: + + return &CloseStateTransition{ + NextState: c, + }, nil + } +} + +// ProcessEvent is the event processing for out terminal state. In this state, +// we just keep looping back on ourselves. +func (c *CloseFin) ProcessEvent(event ProtocolEvent, env *Environment, +) (*CloseStateTransition, error) { + + return &CloseStateTransition{ + NextState: c, + }, nil +} From 6b90254fd3b40a721154168da6c05517cf67b06d Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 31 Jan 2024 19:24:08 -0800 Subject: [PATCH 05/13] lnwallet/chancloser: create a MsgMapper for the protofsm rbf close This'll allow us to treat the state machine as a MsgEndpoint, and have the readHandler in the peer automatically send new messages to it. --- lnwallet/chancloser/rbf_coop_msg_mapper.go | 77 ++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 lnwallet/chancloser/rbf_coop_msg_mapper.go diff --git a/lnwallet/chancloser/rbf_coop_msg_mapper.go b/lnwallet/chancloser/rbf_coop_msg_mapper.go new file mode 100644 index 0000000000..96c8556861 --- /dev/null +++ b/lnwallet/chancloser/rbf_coop_msg_mapper.go @@ -0,0 +1,77 @@ +package chancloser + +import ( + "github.com/lightningnetwork/lnd/fn/v2" + "github.com/lightningnetwork/lnd/lnwire" +) + +// RbfMsgMapper is a struct that implements the MsgMapper interface for the +// rbf-coop close state machine. This enables the state machine to be used with +// protofsm. +type RbfMsgMapper struct { + // blockHeight is the height of the block when the co-op close request + // was initiated. This is used to validate conditions related to the + // thaw height. + blockHeight uint32 + + // chanID is the channel ID of the channel being closed. + chanID lnwire.ChannelID +} + +// NewRbfMsgMapper creates a new RbfMsgMapper instance given the current block +// height when the co-op close request was initiated. +func NewRbfMsgMapper(blockHeight uint32, + chanID lnwire.ChannelID) *RbfMsgMapper { + + return &RbfMsgMapper{ + blockHeight: blockHeight, + chanID: chanID, + } +} + +// someEvent returns the target type as a protocol event option. +func someEvent[T ProtocolEvent](m T) fn.Option[ProtocolEvent] { + return fn.Some(ProtocolEvent(m)) +} + +// isExpectedChanID returns true if the channel ID of the message matches the +// bound instance. +func (r *RbfMsgMapper) isExpectedChanID(chanID lnwire.ChannelID) bool { + return r.chanID == chanID +} + +// MapMsg maps a wire message into a FSM event. If the message is not mappable, +// then an error is returned. +func (r *RbfMsgMapper) MapMsg(wireMsg lnwire.Message) fn.Option[ProtocolEvent] { + switch msg := wireMsg.(type) { + case *lnwire.Shutdown: + if !r.isExpectedChanID(msg.ChannelID) { + return fn.None[ProtocolEvent]() + } + + return someEvent(&ShutdownReceived{ + BlockHeight: r.blockHeight, + ShutdownScript: msg.Address, + }) + + case *lnwire.ClosingComplete: + if !r.isExpectedChanID(msg.ChannelID) { + return fn.None[ProtocolEvent]() + } + + return someEvent(&OfferReceivedEvent{ + SigMsg: *msg, + }) + + case *lnwire.ClosingSig: + if !r.isExpectedChanID(msg.ChannelID) { + return fn.None[ProtocolEvent]() + } + + return someEvent(&LocalSigReceived{ + SigMsg: *msg, + }) + } + + return fn.None[ProtocolEvent]() +} From 2decea86d8eca21776fdc74c92cafdf439f50e06 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 28 Feb 2024 20:29:37 -0600 Subject: [PATCH 06/13] lnwallet/chancloser: add unit tests for new rbf coop close --- lnwallet/chancloser/mock.go | 176 +++ lnwallet/chancloser/rbf_coop_test.go | 1615 ++++++++++++++++++++++++++ 2 files changed, 1791 insertions(+) create mode 100644 lnwallet/chancloser/mock.go create mode 100644 lnwallet/chancloser/rbf_coop_test.go diff --git a/lnwallet/chancloser/mock.go b/lnwallet/chancloser/mock.go new file mode 100644 index 0000000000..c6ea4fba21 --- /dev/null +++ b/lnwallet/chancloser/mock.go @@ -0,0 +1,176 @@ +package chancloser + +import ( + "sync/atomic" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/fn/v2" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/stretchr/testify/mock" +) + +type dummyAdapters struct { + mock.Mock + + msgSent atomic.Bool + + confChan chan *chainntnfs.TxConfirmation + spendChan chan *chainntnfs.SpendDetail +} + +func newDaemonAdapters() *dummyAdapters { + return &dummyAdapters{ + confChan: make(chan *chainntnfs.TxConfirmation, 1), + spendChan: make(chan *chainntnfs.SpendDetail, 1), + } +} + +func (d *dummyAdapters) SendMessages(pub btcec.PublicKey, + msgs []lnwire.Message) error { + + defer d.msgSent.Store(true) + + args := d.Called(pub, msgs) + + return args.Error(0) +} + +func (d *dummyAdapters) BroadcastTransaction(tx *wire.MsgTx, + label string) error { + + args := d.Called(tx, label) + + return args.Error(0) +} + +func (d *dummyAdapters) DisableChannel(op wire.OutPoint) error { + args := d.Called(op) + + return args.Error(0) +} + +func (d *dummyAdapters) RegisterConfirmationsNtfn(txid *chainhash.Hash, + pkScript []byte, numConfs, heightHint uint32, + opts ...chainntnfs.NotifierOption, +) (*chainntnfs.ConfirmationEvent, error) { + + args := d.Called(txid, pkScript, numConfs) + + err := args.Error(0) + + return &chainntnfs.ConfirmationEvent{ + Confirmed: d.confChan, + }, err +} + +func (d *dummyAdapters) RegisterSpendNtfn(outpoint *wire.OutPoint, + pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) { + + args := d.Called(outpoint, pkScript, heightHint) + + err := args.Error(0) + + return &chainntnfs.SpendEvent{ + Spend: d.spendChan, + }, err +} + +type mockFeeEstimator struct { + mock.Mock +} + +func (m *mockFeeEstimator) EstimateFee(chanType channeldb.ChannelType, + localTxOut, remoteTxOut *wire.TxOut, + idealFeeRate chainfee.SatPerKWeight) btcutil.Amount { + + args := m.Called(chanType, localTxOut, remoteTxOut, idealFeeRate) + return args.Get(0).(btcutil.Amount) +} + +type mockChanObserver struct { + mock.Mock +} + +func (m *mockChanObserver) NoDanglingUpdates() bool { + args := m.Called() + return args.Bool(0) +} + +func (m *mockChanObserver) DisableIncomingAdds() error { + args := m.Called() + return args.Error(0) +} + +func (m *mockChanObserver) DisableOutgoingAdds() error { + args := m.Called() + return args.Error(0) +} + +func (m *mockChanObserver) MarkCoopBroadcasted(txn *wire.MsgTx, + local bool) error { + + args := m.Called(txn, local) + return args.Error(0) +} + +func (m *mockChanObserver) MarkShutdownSent(deliveryAddr []byte, + isInitiator bool) error { + + args := m.Called(deliveryAddr, isInitiator) + return args.Error(0) +} + +func (m *mockChanObserver) FinalBalances() fn.Option[ShutdownBalances] { + args := m.Called() + return args.Get(0).(fn.Option[ShutdownBalances]) +} + +func (m *mockChanObserver) DisableChannel() error { + args := m.Called() + return args.Error(0) +} + +type mockErrorReporter struct { + mock.Mock +} + +func (m *mockErrorReporter) ReportError(err error) { + m.Called(err) +} + +type mockCloseSigner struct { + mock.Mock +} + +func (m *mockCloseSigner) CreateCloseProposal(fee btcutil.Amount, + localScript []byte, remoteScript []byte, + closeOpt ...lnwallet.ChanCloseOpt) ( + input.Signature, *chainhash.Hash, btcutil.Amount, error) { + + args := m.Called(fee, localScript, remoteScript, closeOpt) + + return args.Get(0).(input.Signature), args.Get(1).(*chainhash.Hash), + args.Get(2).(btcutil.Amount), args.Error(3) +} + +func (m *mockCloseSigner) CompleteCooperativeClose(localSig, + remoteSig input.Signature, + localScript, remoteScript []byte, + fee btcutil.Amount, closeOpt ...lnwallet.ChanCloseOpt, +) (*wire.MsgTx, btcutil.Amount, error) { + + args := m.Called( + localSig, remoteSig, localScript, remoteScript, fee, closeOpt, + ) + + return args.Get(0).(*wire.MsgTx), args.Get(1).(btcutil.Amount), + args.Error(2) +} diff --git a/lnwallet/chancloser/rbf_coop_test.go b/lnwallet/chancloser/rbf_coop_test.go new file mode 100644 index 0000000000..1bfd574ba3 --- /dev/null +++ b/lnwallet/chancloser/rbf_coop_test.go @@ -0,0 +1,1615 @@ +package chancloser + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/ecdsa" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/mempool" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/fn/v2" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lntest/wait" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/protofsm" + "github.com/lightningnetwork/lnd/tlv" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +var ( + localAddr = lnwire.DeliveryAddress(append( + []byte{txscript.OP_1, txscript.OP_DATA_32}, + bytes.Repeat([]byte{0x01}, 32)..., + )) + + remoteAddr = lnwire.DeliveryAddress(append( + []byte{txscript.OP_1, txscript.OP_DATA_32}, + bytes.Repeat([]byte{0x02}, 32)..., + )) + + localSigBytes = fromHex("3045022100cd496f2ab4fe124f977ffe3caa09f757" + + "6d8a34156b4e55d326b4dffc0399a094022013500a0510b5094bff220c7" + + "4656879b8ca0369d3da78004004c970790862fc03") + localSig = sigMustParse(localSigBytes) + localSigWire = mustWireSig(&localSig) + + remoteSigBytes = fromHex("304502210082235e21a2300022738dabb8e1bbd9d1" + + "9cfb1e7ab8c30a23b0afbb8d178abcf3022024bf68e256c534ddfaf966b" + + "f908deb944305596f7bdcc38d69acad7f9c868724") + remoteSig = sigMustParse(remoteSigBytes) + remoteWireSig = mustWireSig(&remoteSig) + + localTxid = newChainHash(bytes.Repeat([]byte{0x01}, 32)) + remoteTxid = newChainHash(bytes.Repeat([]byte{0x02}, 32)) + + closeTx = wire.NewMsgTx(2) +) + +func newChainHash(b []byte) chainhash.Hash { + var h chainhash.Hash + copy(h[:], b) + return h +} + +func sigMustParse(sigBytes []byte) ecdsa.Signature { + sig, err := ecdsa.ParseSignature(sigBytes) + if err != nil { + panic(err) + } + + return *sig +} + +func mustWireSig(e input.Signature) lnwire.Sig { + wireSig, err := lnwire.NewSigFromSignature(e) + if err != nil { + panic(err) + } + + return wireSig +} + +func fromHex(s string) []byte { + r, err := hex.DecodeString(s) + if err != nil { + panic("invalid hex in source file: " + s) + } + + return r +} + +func randOutPoint(t *testing.T) wire.OutPoint { + var op wire.OutPoint + if _, err := rand.Read(op.Hash[:]); err != nil { + t.Fatalf("unable to generate random outpoint: %v", err) + } + op.Index = rand.Uint32() + + return op +} + +func randPubKey(t *testing.T) *btcec.PublicKey { + priv, err := btcec.NewPrivateKey() + if err != nil { + t.Fatalf("unable to generate private key: %v", err) + } + + return priv.PubKey() +} + +func assertStateTransitions[Event any, Env protofsm.Environment]( + t *testing.T, stateSub protofsm.StateSubscriber[Event, Env], + expectedStates []protofsm.State[Event, Env]) { + + t.Helper() + + for _, expectedState := range expectedStates { + newState := <-stateSub.NewItemCreated.ChanOut() + + require.IsType(t, expectedState, newState) + } + + // We should have no more states. + select { + case newState := <-stateSub.NewItemCreated.ChanOut(): + t.Fatalf("unexpected state transition: %v", newState) + default: + } +} + +// unknownEvent is a dummy event that is used to test that the state machine +// transitions properly fail when an unknown event is received. +type unknownEvent struct { +} + +func (u *unknownEvent) protocolSealed() { +} + +// assertUnknownEventFail asserts that the state machine fails as expected +// given an unknown event. +func assertUnknownEventFail(t *testing.T, startingState ProtocolState) { + t.Helper() + + // Any other event should be ignored. + t.Run("unknown_event", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some(startingState), + }) + defer closeHarness.stopAndAssert() + + closeHarness.expectFailure(ErrInvalidStateTransition) + + closeHarness.chanCloser.SendEvent(&unknownEvent{}) + + // There should be no further state transitions. + closeHarness.assertNoStateTransitions() + }) +} + +type harnessCfg struct { + initialState fn.Option[ProtocolState] + + thawHeight fn.Option[uint32] + + localUpfrontAddr fn.Option[lnwire.DeliveryAddress] + remoteUpfrontAddr fn.Option[lnwire.DeliveryAddress] +} + +// rbfCloserTestHarness is a test harness for the RBF closer. +type rbfCloserTestHarness struct { + *testing.T + + cfg *harnessCfg + + chanCloser *RbfChanCloser + env Environment + + newAddrErr error + + pkScript []byte + peerPub btcec.PublicKey + + startingState RbfState + + feeEstimator *mockFeeEstimator + chanObserver *mockChanObserver + signer *mockCloseSigner + daemonAdapters *dummyAdapters + errReporter *mockErrorReporter + + stateSub protofsm.StateSubscriber[ProtocolEvent, *Environment] +} + +var errfailAddr = fmt.Errorf("fail") + +// failNewAddrFunc causes the newAddrFunc to fail. +func (r *rbfCloserTestHarness) failNewAddrFunc() { + r.T.Helper() + + r.newAddrErr = errfailAddr +} + +func (r *rbfCloserTestHarness) newAddrFunc() ( + lnwire.DeliveryAddress, error) { + + r.T.Helper() + + return lnwire.DeliveryAddress{}, r.newAddrErr +} + +func (r *rbfCloserTestHarness) assertExpectations() { + r.T.Helper() + + r.feeEstimator.AssertExpectations(r.T) + r.chanObserver.AssertExpectations(r.T) + r.daemonAdapters.AssertExpectations(r.T) + r.errReporter.AssertExpectations(r.T) + r.signer.AssertExpectations(r.T) +} + +func (r *rbfCloserTestHarness) stopAndAssert() { + r.T.Helper() + + defer r.chanCloser.RemoveStateSub(r.stateSub) + r.chanCloser.Stop() + + r.assertExpectations() +} + +func (r *rbfCloserTestHarness) assertStartupAssertions() { + r.T.Helper() + + // When the state machine has started up, we recv a starting state + // transition for the initial state. + expectedStates := []RbfState{r.startingState} + assertStateTransitions(r.T, r.stateSub, expectedStates) + + // Registering the spend transaction should have been called. + r.daemonAdapters.AssertCalled( + r.T, "RegisterSpendNtfn", &r.env.ChanPoint, r.pkScript, + r.env.Scid.BlockHeight, + ) +} + +func (r *rbfCloserTestHarness) assertNoStateTransitions() { + select { + case newState := <-r.stateSub.NewItemCreated.ChanOut(): + r.T.Fatalf("unexpected state transition: %T", newState) + case <-time.After(10 * time.Millisecond): + } +} + +func (r *rbfCloserTestHarness) assertStateTransitions(states ...RbfState) { + assertStateTransitions(r.T, r.stateSub, states) +} + +func (r *rbfCloserTestHarness) currentState() RbfState { + state, err := r.chanCloser.CurrentState() + require.NoError(r.T, err) + + return state +} + +type shutdownExpect struct { + isInitiator bool + allowSend bool + recvShutdown bool + finalBalances fn.Option[ShutdownBalances] +} + +func (r *rbfCloserTestHarness) expectShutdownEvents(expect shutdownExpect) { + r.T.Helper() + + r.chanObserver.On("FinalBalances").Return(expect.finalBalances) + + if expect.isInitiator { + r.chanObserver.On("NoDanglingUpdates").Return(expect.allowSend) + } else { + r.chanObserver.On("NoDanglingUpdates").Return( + expect.allowSend, + ).Maybe() + } + + // When we're receiving a shutdown, we should also disable incoming + // adds. + if expect.recvShutdown { + r.chanObserver.On("DisableIncomingAdds").Return(nil) + } + + // If a close is in progress, this is an RBF iteration, the link has + // already been cleaned up, so we don't expect any assertions, other + // than to check the closing state. + if expect.finalBalances.IsSome() { + return + } + + r.chanObserver.On("DisableOutgoingAdds").Return(nil) + + r.chanObserver.On( + "MarkShutdownSent", mock.Anything, expect.isInitiator, + ).Return(nil) + + r.chanObserver.On("DisableChannel").Return(nil) +} + +func (r *rbfCloserTestHarness) expectFinalBalances( + b fn.Option[ShutdownBalances]) { + + r.chanObserver.On("FinalBalances").Return(b) +} + +func (r *rbfCloserTestHarness) expectIncomingAddsDisabled() { + r.T.Helper() + + r.chanObserver.On("DisableIncomingAdds").Return(nil) +} + +type msgMatcher func([]lnwire.Message) bool + +func singleMsgMatcher[M lnwire.Message](f func(M) bool) msgMatcher { + return func(msgs []lnwire.Message) bool { + if len(msgs) != 1 { + return false + } + + wireMsg := msgs[0] + + msg, ok := wireMsg.(M) + if !ok { + return false + } + + if f == nil { + return true + } + + return f(msg) + } +} + +func (r *rbfCloserTestHarness) expectMsgSent(matcher msgMatcher) { + r.T.Helper() + + if matcher == nil { + r.daemonAdapters.On( + "SendMessages", r.peerPub, mock.Anything, + ).Return(nil) + } else { + r.daemonAdapters.On( + "SendMessages", r.peerPub, mock.MatchedBy(matcher), + ).Return(nil) + } +} + +func (r *rbfCloserTestHarness) expectFeeEstimate(absoluteFee btcutil.Amount, + numTimes int) { + + r.T.Helper() + + // TODO(roasbeef): mo assertions for dust case + + r.feeEstimator.On( + "EstimateFee", mock.Anything, mock.Anything, mock.Anything, + mock.Anything, + ).Return(absoluteFee, nil).Times(numTimes) +} + +func (r *rbfCloserTestHarness) expectFailure(err error) { + r.T.Helper() + + errorMatcher := func(e error) bool { + return errors.Is(e, err) + } + + r.errReporter.On("ReportError", mock.MatchedBy(errorMatcher)).Return() +} + +func (r *rbfCloserTestHarness) expectNewCloseSig( + localScript, remoteScript []byte, fee btcutil.Amount, + closeBalance btcutil.Amount) { + + r.T.Helper() + + r.signer.On( + "CreateCloseProposal", fee, localScript, remoteScript, + mock.Anything, + ).Return(&localSig, &localTxid, closeBalance, nil) +} + +func (r *rbfCloserTestHarness) waitForMsgSent() { + r.T.Helper() + + err := wait.Predicate(func() bool { + return r.daemonAdapters.msgSent.Load() + }, time.Second*3) + require.NoError(r.T, err) +} + +func (r *rbfCloserTestHarness) expectRemoteCloseFinalized( + localCoopSig, remoteCoopSig input.Signature, localScript, + remoteScript []byte, fee btcutil.Amount, + balanceAfterClose btcutil.Amount, isLocal bool) { + + r.expectNewCloseSig( + localScript, remoteScript, fee, balanceAfterClose, + ) + + r.expectCloseFinalized( + localCoopSig, remoteCoopSig, localScript, remoteScript, + fee, balanceAfterClose, isLocal, + ) + + r.expectMsgSent(singleMsgMatcher[*lnwire.ClosingSig](nil)) +} + +func (r *rbfCloserTestHarness) expectCloseFinalized( + localCoopSig, remoteCoopSig input.Signature, localScript, + remoteScript []byte, fee btcutil.Amount, + balanceAfterClose btcutil.Amount, isLocal bool) { + + // The caller should obtain the final signature. + r.signer.On("CompleteCooperativeClose", + localCoopSig, remoteCoopSig, localScript, + remoteScript, fee, mock.Anything, + ).Return(closeTx, balanceAfterClose, nil) + + // The caller should also mark the transaction as broadcast on disk. + r.chanObserver.On("MarkCoopBroadcasted", closeTx, isLocal).Return(nil) + + // Finally, we expect that the daemon executor should broadcast the + // above transaction. + r.daemonAdapters.On( + "BroadcastTransaction", closeTx, mock.Anything, + ).Return(nil) +} + +func (r *rbfCloserTestHarness) expectChanPendingClose() { + var nilTx *wire.MsgTx + r.chanObserver.On("MarkCoopBroadcasted", nilTx, true).Return(nil) +} + +func (r *rbfCloserTestHarness) assertLocalClosePending() { + // We should then remain in the outer close negotiation state. + r.assertStateTransitions(&ClosingNegotiation{}) + + // If we examine the final resting state, we should see that the we're + // now in the negotiation state still for our local peer state. + currentState := assertStateT[*ClosingNegotiation](r) + + // From this, we'll assert the resulting peer state, and that the co-op + // close txn is known. + closePendingState, ok := currentState.PeerState.GetForParty( + lntypes.Local, + ).(*ClosePending) + require.True(r.T, ok) + + require.Equal(r.T, closeTx, closePendingState.CloseTx) +} + +type dustExpectation uint + +const ( + noDustExpect dustExpectation = iota + + localDustExpect + + remoteDustExpect +) + +func (d dustExpectation) String() string { + switch d { + case noDustExpect: + return "no dust" + case localDustExpect: + return "local dust" + case remoteDustExpect: + return "remote dust" + default: + return "unknown" + } +} + +// expectHalfSignerIteration asserts that we carry out 1/2 of the locally +// initiated signer iteration. This constitutes sending a ClosingComplete +// message to the remote party, and all the other intermediate steps. +func (r *rbfCloserTestHarness) expectHalfSignerIteration( + initEvent ProtocolEvent, balanceAfterClose, absoluteFee btcutil.Amount, + dustExpect dustExpectation) { + + numFeeCalls := 2 + + // If we're using the SendOfferEvent as a trigger, we only need to call + // the few estimation once. + if _, ok := initEvent.(*SendOfferEvent); ok { + numFeeCalls = 1 + } + + // We'll now expect that fee estimation is called, and then + // send in the flushed event. We expect two calls: one to + // figure out if we can pay the fee, and then another when we + // actually pay the fee. + r.expectFeeEstimate(absoluteFee, numFeeCalls) + + // Next, we'll assert that we receive calls to generate a new + // commitment signature, and then send out the commit + // ClosingComplete message to the remote peer. + r.expectNewCloseSig( + localAddr, remoteAddr, absoluteFee, balanceAfterClose, + ) + + // We expect that only the closer and closee sig is set as both + // parties have a balance. + msgExpect := singleMsgMatcher(func(m *lnwire.ClosingComplete) bool { + r.T.Helper() + + switch { + case m.CloserNoClosee.IsSome(): + r.T.Logf("closer no closee field set, expected: %v", + dustExpect) + + return dustExpect == remoteDustExpect + case m.NoCloserClosee.IsSome(): + r.T.Logf("no close closee field set, expected: %v", + dustExpect) + + return dustExpect == localDustExpect + default: + r.T.Logf("no dust field set, expected: %v", dustExpect) + + return (m.CloserAndClosee.IsSome() && + dustExpect == noDustExpect) + } + }) + r.expectMsgSent(msgExpect) + + r.chanCloser.SendEvent(initEvent) + + // Based on the init event, we'll either just go to the closing + // negotiation state, or go through the channel flushing state first. + var expectedStates []RbfState + switch initEvent.(type) { + case *ShutdownReceived: + expectedStates = []RbfState{ + &ChannelFlushing{}, &ClosingNegotiation{}, + } + + case *SendOfferEvent: + expectedStates = []RbfState{&ClosingNegotiation{}} + + case *ChannelFlushed: + // If we're sending a flush event here, then this means that we + // also have enough balance to cover the fee so we'll have + // another inner transition to the negotiation state. + expectedStates = []RbfState{ + &ClosingNegotiation{}, &ClosingNegotiation{}, + } + + default: + r.T.Fatalf("unknown event type: %T", initEvent) + } + + // We should transition from the negotiation state back to + // itself. + // + // TODO(roasbeef): take in expected set of transitions!!! + // * or base off of event, if shutdown recv'd know we're doing a full + // loop + r.assertStateTransitions(expectedStates...) + + // If we examine the final resting state, we should see that + // the we're now in the LocalOffersSent for our local peer + // state. + currentState := assertStateT[*ClosingNegotiation](r) + + // From this, we'll assert the resulting peer state. + offerSentState, ok := currentState.PeerState.GetForParty( + lntypes.Local, + ).(*LocalOfferSent) + require.True(r.T, ok) + + // The proposed fee, as well as our local signature should be + // properly stashed in the state. + require.Equal(r.T, absoluteFee, offerSentState.ProposedFee) + require.Equal(r.T, localSigWire, offerSentState.LocalSig) +} + +func (r *rbfCloserTestHarness) assertSingleRbfIteration( + initEvent ProtocolEvent, balanceAfterClose, absoluteFee btcutil.Amount, + dustExpect dustExpectation) { + + // We'll now send in the send offer event, which should trigger 1/2 of + // the RBF loop, ending us in the LocalOfferSent state. + r.expectHalfSignerIteration( + initEvent, balanceAfterClose, absoluteFee, noDustExpect, + ) + + // Now that we're in the local offer sent state, we'll send the + // response of the remote party, which completes one iteration + localSigEvent := &LocalSigReceived{ + SigMsg: lnwire.ClosingSig{ + ClosingSigs: lnwire.ClosingSigs{ + CloserAndClosee: newSigTlv[tlv.TlvType3]( + remoteWireSig, + ), + }, + }, + } + + // Before we send the event, we expect the close the final signature to + // be combined/obtained, and for the close to finalized on disk. + r.expectCloseFinalized( + &localSig, &remoteSig, localAddr, remoteAddr, absoluteFee, + balanceAfterClose, true, + ) + + r.chanCloser.SendEvent(localSigEvent) + + // We should transition to the pending closing state now. + r.assertLocalClosePending() +} + +func (r *rbfCloserTestHarness) assertSingleRemoteRbfIteration( + initEvent ProtocolEvent, balanceAfterClose, absoluteFee btcutil.Amount, + sequence uint32, iteration bool) { + + // If this is an iteration, then we expect some intermediate states, + // before we enter the main RBF/sign loop. + if iteration { + r.expectFeeEstimate(absoluteFee, 1) + + r.assertStateTransitions( + &ChannelActive{}, &ShutdownPending{}, + &ChannelFlushing{}, &ClosingNegotiation{}, + ) + } + + // When we receive the signature below, our local state machine should + // move to finalize the close. + r.expectRemoteCloseFinalized( + &localSig, &remoteSig, localAddr, remoteAddr, + absoluteFee, balanceAfterClose, false, + ) + + r.chanCloser.SendEvent(initEvent) + + // Our outer state should transition to ClosingNegotiation state. + r.assertStateTransitions(&ClosingNegotiation{}) + + // If we examine the final resting state, we should see that the we're + // now in the ClosePending state for the remote peer. + currentState := assertStateT[*ClosingNegotiation](r) + + // From this, we'll assert the resulting peer state. + pendingState, ok := currentState.PeerState.GetForParty( + lntypes.Remote, + ).(*ClosePending) + require.True(r.T, ok) + + // The proposed fee, as well as our local signature should be properly + // stashed in the state. + require.Equal(r.T, closeTx, pendingState.CloseTx) +} + +func assertStateT[T ProtocolState](h *rbfCloserTestHarness) T { + h.T.Helper() + + currentState, ok := h.currentState().(T) + require.True(h.T, ok) + + return currentState +} + +// newRbfCloserTestHarness creates a new test harness for the RBF closer. +func newRbfCloserTestHarness(t *testing.T, + cfg *harnessCfg) *rbfCloserTestHarness { + + startingHeight := 200 + + chanPoint := randOutPoint(t) + chanID := lnwire.NewChanIDFromOutPoint(chanPoint) + + scid := lnwire.NewShortChanIDFromInt(rand.Uint64()) + + peerPub := randPubKey(t) + + msgMapper := NewRbfMsgMapper(uint32(startingHeight), chanID) + + initialState := cfg.initialState.UnwrapOr(&ChannelActive{}) + + defaultFeeRate := chainfee.FeePerKwFloor + + feeEstimator := &mockFeeEstimator{} + mockObserver := &mockChanObserver{} + errReporter := &mockErrorReporter{} + mockSigner := &mockCloseSigner{} + + harness := &rbfCloserTestHarness{ + T: t, + cfg: cfg, + startingState: initialState, + feeEstimator: feeEstimator, + signer: mockSigner, + chanObserver: mockObserver, + errReporter: errReporter, + peerPub: *peerPub, + } + + env := Environment{ + ChainParams: chaincfg.RegressionNetParams, + ChanPeer: *peerPub, + ChanPoint: chanPoint, + ChanID: chanID, + Scid: scid, + DefaultFeeRate: defaultFeeRate.FeePerVByte(), + ThawHeight: cfg.thawHeight, + RemoteUpfrontShutdown: cfg.remoteUpfrontAddr, + LocalUpfrontShutdown: cfg.localUpfrontAddr, + NewDeliveryScript: harness.newAddrFunc, + FeeEstimator: feeEstimator, + ChanObserver: mockObserver, + CloseSigner: mockSigner, + } + harness.env = env + + var pkScript []byte + harness.pkScript = pkScript + + spendEvent := protofsm.RegisterSpend[ProtocolEvent]{ + OutPoint: chanPoint, + HeightHint: scid.BlockHeight, + PkScript: pkScript, + PostSpendEvent: fn.Some[RbfSpendMapper](SpendMapper), + } + + daemonAdapters := newDaemonAdapters() + harness.daemonAdapters = daemonAdapters + + protoCfg := RbfChanCloserCfg{ + ErrorReporter: errReporter, + Daemon: daemonAdapters, + InitialState: initialState, + Env: &env, + InitEvent: fn.Some[protofsm.DaemonEvent](&spendEvent), + MsgMapper: fn.Some[protofsm.MsgMapper[ProtocolEvent]]( + msgMapper, + ), + CustomPollInterval: fn.Some(time.Nanosecond), + } + + // Before we start we always expect an initial spend event. + daemonAdapters.On( + "RegisterSpendNtfn", &chanPoint, pkScript, scid.BlockHeight, + ).Return(nil) + + chanCloser := protofsm.NewStateMachine(protoCfg) + chanCloser.Start() + + harness.stateSub = chanCloser.RegisterStateEvents() + + harness.chanCloser = &chanCloser + + return harness +} + +func newCloser(t *testing.T, cfg *harnessCfg) *rbfCloserTestHarness { + chanCloser := newRbfCloserTestHarness(t, cfg) + + // We should start in the active state, and have our spend req + // daemon event handled. + chanCloser.assertStartupAssertions() + + return chanCloser +} + +// TestRbfChannelActiveTransitions tests the transitions of from the +// ChannelActive state. +func TestRbfChannelActiveTransitions(t *testing.T) { + localAddr := lnwire.DeliveryAddress(bytes.Repeat([]byte{0x01}, 20)) + remoteAddr := lnwire.DeliveryAddress(bytes.Repeat([]byte{0x02}, 20)) + + feeRate := chainfee.SatPerVByte(1000) + + // Test that if a spend event is received, the FSM transitions to the + // CloseFin terminal state. + t.Run("spend_event", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + localUpfrontAddr: fn.Some(localAddr), + }) + defer closeHarness.stopAndAssert() + + closeHarness.chanCloser.SendEvent(&SpendEvent{}) + + closeHarness.assertStateTransitions(&CloseFin{}) + }) + + // If we send in a local shutdown event, but fail to get an addr, the + // state machine should terminate. + t.Run("local_initiated_close_addr_fail", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{}) + defer closeHarness.stopAndAssert() + + closeHarness.failNewAddrFunc() + closeHarness.expectFailure(errfailAddr) + + // We don't specify an upfront shutdown addr, and don't specify + // on here in the vent, so we should call new addr, but then + // fail. + closeHarness.chanCloser.SendEvent(&SendShutdown{}) + + // We shouldn't have transitioned to a new state. + closeHarness.assertNoStateTransitions() + }) + + // Initiating the shutdown should have us transition to the shutdown + // pending state. We should also emit events to disable the channel, + // and also send a message to our target peer. + t.Run("local_initiated_close_ok", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + localUpfrontAddr: fn.Some(localAddr), + }) + defer closeHarness.stopAndAssert() + + // Once we send the event below, we should get calls to the + // chan observer, the msg sender, and the link control. + closeHarness.expectShutdownEvents(shutdownExpect{ + isInitiator: true, + allowSend: true, + }) + closeHarness.expectMsgSent(nil) + + // If we send the shutdown event, we should transition to the + // shutdown pending state. + closeHarness.chanCloser.SendEvent(&SendShutdown{ + IdealFeeRate: feeRate, + }) + closeHarness.assertStateTransitions(&ShutdownPending{}) + + // If we examine the internal state, it should be consistent + // with the fee+addr we sent in. + currentState := assertStateT[*ShutdownPending](closeHarness) + + require.Equal( + t, feeRate, currentState.IdealFeeRate.UnsafeFromSome(), + ) + require.Equal( + t, localAddr, + currentState.ShutdownScripts.LocalDeliveryScript, + ) + + // Wait till the msg has been sent to assert our expectations. + // + // TODO(roasbeef): can use call.WaitFor here? + closeHarness.waitForMsgSent() + }) + + // TODO(roasbeef): thaw height fail + + // When we receive a shutdown, we should transition to the shutdown + // pending state, with the local+remote shutdown addrs known. + t.Run("remote_initiated_close_ok", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + localUpfrontAddr: fn.Some(localAddr), + }) + defer closeHarness.stopAndAssert() + + // We assert our shutdown events, and also that we eventually + // send a shutdown to the remote party. We'll hold back the + // send in this case though, as we should only send once the no + // updates are dangling. + closeHarness.expectShutdownEvents(shutdownExpect{ + isInitiator: false, + allowSend: false, + recvShutdown: true, + }) + + // Next, we'll emit the recv event, with the addr of the remote + // party. + closeHarness.chanCloser.SendEvent(&ShutdownReceived{ + ShutdownScript: remoteAddr, + }) + + // We should transition to the shutdown pending state. + closeHarness.assertStateTransitions(&ShutdownPending{}) + + currentState := assertStateT[*ShutdownPending](closeHarness) + + // Both the local and remote shutdown scripts should be set. + require.Equal( + t, localAddr, + currentState.ShutdownScripts.LocalDeliveryScript, + ) + require.Equal( + t, remoteAddr, + currentState.ShutdownScripts.RemoteDeliveryScript, + ) + }) + + // Any other event should be ignored. + assertUnknownEventFail(t, &ChannelActive{}) +} + +// TestRbfShutdownPendingTransitions tests the transitions of the RBF closer +// once we get to the shutdown pending state. In this state, we wait for either +// a shutdown to be received, or a notification that we're able to send a +// shutdown ourselves. +func TestRbfShutdownPendingTransitions(t *testing.T) { + t.Parallel() + + startingState := &ShutdownPending{} + + // Test that if a spend event is received, the FSM transitions to the + // CloseFin terminal state. + t.Run("spend_event", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState]( + startingState, + ), + localUpfrontAddr: fn.Some(localAddr), + }) + defer closeHarness.stopAndAssert() + + closeHarness.chanCloser.SendEvent(&SpendEvent{}) + + closeHarness.assertStateTransitions(&CloseFin{}) + }) + + // If the remote party sends us a diff shutdown addr than we expected, + // then we'll fail. + t.Run("initiator_shutdown_recv_validate_fail", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState]( + startingState, + ), + remoteUpfrontAddr: fn.Some(remoteAddr), + }) + defer closeHarness.stopAndAssert() + + // We should fail as the shutdown script isn't what we + // expected. + closeHarness.expectFailure(ErrUpfrontShutdownScriptMismatch) + + // We'll now send in a ShutdownReceived event, but with a + // different address provided in the shutdown message. This + // should result in an error. + closeHarness.chanCloser.SendEvent(&ShutdownReceived{ + ShutdownScript: localAddr, + }) + + // We shouldn't have transitioned to a new state. + closeHarness.assertNoStateTransitions() + }) + + // Otherwise, if the shutdown is well composed, then we should + // transition to the ChannelFlushing state. + t.Run("initiator_shutdown_recv_ok", func(t *testing.T) { + firstState := *startingState + firstState.IdealFeeRate = fn.Some( + chainfee.FeePerKwFloor.FeePerVByte(), + ) + firstState.ShutdownScripts = ShutdownScripts{ + LocalDeliveryScript: localAddr, + RemoteDeliveryScript: remoteAddr, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState]( + &firstState, + ), + localUpfrontAddr: fn.Some(localAddr), + remoteUpfrontAddr: fn.Some(remoteAddr), + }) + defer closeHarness.stopAndAssert() + + // We should disable the outgoing adds for the channel at this + // point as well. + closeHarness.expectFinalBalances(fn.None[ShutdownBalances]()) + closeHarness.expectIncomingAddsDisabled() + + // We'll send in a shutdown received event, with the expected + // co-op close addr. + closeHarness.chanCloser.SendEvent(&ShutdownReceived{ + ShutdownScript: remoteAddr, + }) + + // We should transition to the channel flushing state. + closeHarness.assertStateTransitions(&ChannelFlushing{}) + + // Now we'll ensure that the flushing state has the proper + // co-op close state. + currentState := assertStateT[*ChannelFlushing](closeHarness) + + require.Equal(t, localAddr, currentState.LocalDeliveryScript) + require.Equal(t, remoteAddr, currentState.RemoteDeliveryScript) + require.Equal( + t, firstState.IdealFeeRate, currentState.IdealFeeRate, + ) + }) + + // If we received the shutdown event, then we'll rely on the external + // signal that we were able to send the message once there were no + // updates dangling. + t.Run("responder_complete", func(t *testing.T) { + firstState := *startingState + firstState.IdealFeeRate = fn.Some( + chainfee.FeePerKwFloor.FeePerVByte(), + ) + firstState.ShutdownScripts = ShutdownScripts{ + LocalDeliveryScript: localAddr, + RemoteDeliveryScript: remoteAddr, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState]( + &firstState, + ), + }) + defer closeHarness.stopAndAssert() + + // In this case we're doing the shutdown dance for the first + // time, so we'll mark the channel as not being flushed. + closeHarness.expectFinalBalances(fn.None[ShutdownBalances]()) + + // We'll send in a shutdown received event. + closeHarness.chanCloser.SendEvent(&ShutdownComplete{}) + + // We should transition to the channel flushing state. + closeHarness.assertStateTransitions(&ChannelFlushing{}) + }) + + // Any other event should be ignored. + assertUnknownEventFail(t, startingState) +} + +// TestRbfChannelFlushingTransitions tests the transitions of the RBF closer +// for the channel flushing state. Once the coast is clear, we should +// transition to the negotiation state. +func TestRbfChannelFlushingTransitions(t *testing.T) { + t.Parallel() + + localBalance := lnwire.NewMSatFromSatoshis(10_000) + remoteBalance := lnwire.NewMSatFromSatoshis(50_000) + + absoluteFee := btcutil.Amount(10_100) + + startingState := &ChannelFlushing{ + ShutdownScripts: ShutdownScripts{ + LocalDeliveryScript: localAddr, + RemoteDeliveryScript: remoteAddr, + }, + } + + flushTemplate := &ChannelFlushed{ + ShutdownBalances: ShutdownBalances{ + LocalBalance: localBalance, + RemoteBalance: remoteBalance, + }, + } + + // If send in the channel flushed event, but the local party can't pay + // for fees, then we should just head to the negotiation state. + for _, isFreshFlush := range []bool{true, false} { + chanFlushedEvent := *flushTemplate + chanFlushedEvent.FreshFlush = isFreshFlush + + testName := fmt.Sprintf("local_cannot_pay_for_fee/"+ + "fresh_flush=%v", isFreshFlush) + + t.Run(testName, func(t *testing.T) { + firstState := *startingState + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState]( + &firstState, + ), + }) + defer closeHarness.stopAndAssert() + + // As part of the set up for this state, we'll have the + // final absolute fee required be greater than the + // balance of the local party. + closeHarness.expectFeeEstimate(absoluteFee, 1) + + // If this is a fresh flush, then we expect the state + // to be marked on disk. + if isFreshFlush { + closeHarness.expectChanPendingClose() + } + + // We'll now send in the event which should trigger + // this code path. + closeHarness.chanCloser.SendEvent(&chanFlushedEvent) + + // With the event sent, we should now transition + // straight to the ClosingNegotiation state, with no + // further state transitions. + closeHarness.assertStateTransitions( + &ClosingNegotiation{}, + ) + }) + } + + for _, isFreshFlush := range []bool{true, false} { + flushEvent := *flushTemplate + flushEvent.FreshFlush = isFreshFlush + + // We'll modify the starting balance to be 3x the required fee + // to ensure that we can pay for the fee. + flushEvent.ShutdownBalances.LocalBalance = lnwire.NewMSatFromSatoshis( //nolint:ll + absoluteFee * 3, + ) + + testName := fmt.Sprintf("local_can_pay_for_fee/"+ + "fresh_flush=%v", isFreshFlush) + + // This scenario, we'll have the local party be able to pay for + // the fees, which will trigger additional state transitions. + t.Run(testName, func(t *testing.T) { + firstState := *startingState + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState]( + &firstState, + ), + }) + defer closeHarness.stopAndAssert() + + localBalance := flushEvent.ShutdownBalances.LocalBalance + balanceAfterClose := localBalance.ToSatoshis() - absoluteFee //nolint:ll + + // If this is a fresh flush, then we expect the state + // to be marked on disk. + if isFreshFlush { + closeHarness.expectChanPendingClose() + } + + // From where, we expect the state transition to go + // back to closing negotiated, for a ClosingComplete + // message to be sent and then for us to terminate at + // that state. This is 1/2 of the normal RBF signer + // flow. + closeHarness.expectHalfSignerIteration( + &flushEvent, balanceAfterClose, absoluteFee, + noDustExpect, + ) + }) + } + + // Any other event should be ignored. + assertUnknownEventFail(t, startingState) +} + +// TestRbfCloseClosingNegotiationLocal tests the local portion of the primary +// RBF close loop. We should be able to transition to a close state, get a sig, +// then restart all over again to re-request a signature of at new higher fee +// rate. +func TestRbfCloseClosingNegotiationLocal(t *testing.T) { + t.Parallel() + + localBalance := lnwire.NewMSatFromSatoshis(40_000) + remoteBalance := lnwire.NewMSatFromSatoshis(50_000) + + absoluteFee := btcutil.Amount(10_100) + + closeTerms := &CloseChannelTerms{ + ShutdownBalances: ShutdownBalances{ + LocalBalance: localBalance, + RemoteBalance: remoteBalance, + }, + ShutdownScripts: ShutdownScripts{ + LocalDeliveryScript: localAddr, + RemoteDeliveryScript: remoteAddr, + }, + } + startingState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &LocalCloseStart{ + CloseChannelTerms: *closeTerms, + }, + }, + } + + sendOfferEvent := &SendOfferEvent{ + TargetFeeRate: chainfee.FeePerKwFloor.FeePerVByte(), + } + + balanceAfterClose := localBalance.ToSatoshis() - absoluteFee + + // In this state, we'll simulate deciding that we need to send a new + // offer to the remote party. + t.Run("send_offer_iteration_no_dust", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](startingState), + }) + defer closeHarness.stopAndAssert() + + // We'll now send in the initial sender offer event, which + // should then trigger a single RBF iteration, ending at the + // pending state. + closeHarness.assertSingleRbfIteration( + sendOfferEvent, balanceAfterClose, absoluteFee, + noDustExpect, + ) + }) + + // We'll run a similar test as above, but verify that if more than one + // sig field is set, we error out. + t.Run("send_offer_too_many_sigs_received", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](startingState), + }) + defer closeHarness.stopAndAssert() + + // We'll kick off the test as normal, triggering a send offer + // event to advance the state machine. + closeHarness.expectHalfSignerIteration( + sendOfferEvent, balanceAfterClose, absoluteFee, + noDustExpect, + ) + + // We'll now craft the local sig received event, but this time + // we'll specify 2 signature fields. + localSigEvent := &LocalSigReceived{ + SigMsg: lnwire.ClosingSig{ + ClosingSigs: lnwire.ClosingSigs{ + CloserNoClosee: newSigTlv[tlv.TlvType1]( + remoteWireSig, + ), + CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll + remoteWireSig, + ), + }, + }, + } + + // We expect that the state machine fails as we received more + // than one error. + closeHarness.expectFailure(ErrTooManySigs) + + // We should fail as the remote party sent us more than one + // signature. + closeHarness.chanCloser.SendEvent(localSigEvent) + }) + + // Next, we'll verify that if the balance of the remote party is dust, + // then the proper sig field is set. + t.Run("send_offer_iteration_remote_dust", func(t *testing.T) { + // We'll modify the starting state to reduce the balance of the + // remote party to something that'll be dust. + newCloseTerms := *closeTerms + newCloseTerms.ShutdownBalances.RemoteBalance = 100 + firstState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &LocalCloseStart{ + CloseChannelTerms: newCloseTerms, + }, + }, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](firstState), + }) + defer closeHarness.stopAndAssert() + + // We'll kick off the half iteration as normal, but this time + // we expect that the remote party's output is dust, so the + // proper field is set. + closeHarness.expectHalfSignerIteration( + sendOfferEvent, balanceAfterClose, absoluteFee, + remoteDustExpect, + ) + }) + + // Similarly, we'll verify that if our final closing balance is dust, + // then we send the sig that omits our output. + t.Run("send_offer_iteration_local_dust", func(t *testing.T) { + firstState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &LocalCloseStart{ + CloseChannelTerms: *closeTerms, + }, + }, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](firstState), + }) + defer closeHarness.stopAndAssert() + + // We'll kick off the half iteration as normal, but this time + // we'll have our balance after close be dust, so we expect + // that the local output is dust in the sig we send. + dustBalance := btcutil.Amount(100) + closeHarness.expectHalfSignerIteration( + sendOfferEvent, dustBalance, absoluteFee, + localDustExpect, + ) + }) + + // In this test, we'll assert that we're able to restart the RBF loop + // to trigger additional signature iterations. + t.Run("send_offer_rbf_iteration_loop", func(t *testing.T) { + firstState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &LocalCloseStart{ + CloseChannelTerms: *closeTerms, + }, + }, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](firstState), + localUpfrontAddr: fn.Some(localAddr), + }) + defer closeHarness.stopAndAssert() + + // We'll start out by first triggering a routine iteration, + // assuming we start in this negotiation state. + closeHarness.assertSingleRbfIteration( + sendOfferEvent, balanceAfterClose, absoluteFee, + noDustExpect, + ) + + // Next, we'll send in a new SendShutdown event which simulates + // the user requesting a RBF fee bump. We'll use 10x the fee we + // used in the last iteration. + rbfFeeBump := chainfee.FeePerKwFloor.FeePerVByte() * 10 + sendShutdown := &SendShutdown{ + IdealFeeRate: rbfFeeBump, + } + + // We should send shutdown as normal, but skip some other + // checks as we know the close is in progress. + closeHarness.expectShutdownEvents(shutdownExpect{ + allowSend: true, + finalBalances: fn.Some(closeTerms.ShutdownBalances), + recvShutdown: true, + }) + closeHarness.expectMsgSent( + singleMsgMatcher[*lnwire.Shutdown](nil), + ) + + closeHarness.chanCloser.SendEvent(sendShutdown) + + // We should first transition to the Channel Active state + // momentarily, before transitioning to the shutdown pending + // state. + closeHarness.assertStateTransitions( + &ChannelActive{}, &ShutdownPending{}, + ) + + // Next, we'll send in the shutdown received event, which + // should transition us to the channel flushing state. + shutdownEvent := &ShutdownReceived{ + ShutdownScript: remoteAddr, + } + + // Now we expect that aanother full RBF iteration takes place + // (we initiatea a new local sig). + closeHarness.assertSingleRbfIteration( + shutdownEvent, balanceAfterClose, absoluteFee, + noDustExpect, + ) + + // We should terminate in the negotiation state. + closeHarness.assertStateTransitions( + &ClosingNegotiation{}, + ) + }) +} + +// TestRbfCloseClosingNegotiationRemote tests that state machine is able to +// handle RBF iterations to sign for the closing transaction of the remote +// party. +func TestRbfCloseClosingNegotiationRemote(t *testing.T) { + t.Parallel() + + localBalance := lnwire.NewMSatFromSatoshis(40_000) + remoteBalance := lnwire.NewMSatFromSatoshis(50_000) + + absoluteFee := btcutil.Amount(10_100) + + closeTerms := &CloseChannelTerms{ + ShutdownBalances: ShutdownBalances{ + LocalBalance: localBalance, + RemoteBalance: remoteBalance, + }, + ShutdownScripts: ShutdownScripts{ + LocalDeliveryScript: localAddr, + RemoteDeliveryScript: remoteAddr, + }, + } + startingState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &LocalCloseStart{ + CloseChannelTerms: *closeTerms, + }, + Remote: &RemoteCloseStart{ + CloseChannelTerms: *closeTerms, + }, + }, + } + + balanceAfterClose := remoteBalance.ToSatoshis() - absoluteFee + + sequence := uint32(mempool.MaxRBFSequence) + + // This case tests that if we receive a signature from the remote + // party, where they can't pay for the fees, we exit. + t.Run("recv_offer_cannot_pay_for_fees", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](startingState), + }) + defer closeHarness.stopAndAssert() + + // We should fail as they sent a sig, but can't pay for fees. + closeHarness.expectFailure(ErrRemoteCannotPay) + + // We'll send in a new fee proposal, but the proposed fee will + // be higher than the remote party's balance. + feeOffer := &OfferReceivedEvent{ + SigMsg: lnwire.ClosingComplete{ + FeeSatoshis: absoluteFee * 10, + }, + } + closeHarness.chanCloser.SendEvent(feeOffer) + + // We shouldn't have transitioned to a new state. + closeHarness.assertNoStateTransitions() + }) + + // If the remote party sends us a signature with a final sequence, then + // we'll error out as it can't be RBF'd + t.Run("recv_offer_final_sequence", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](startingState), + }) + defer closeHarness.stopAndAssert() + + // We should fail as they sent a non final sequence. + closeHarness.expectFailure(ErrNonFinalSequence) + + // We'll send an offer with something beyond the max RBF value, + // this should fail. + feeOffer := &OfferReceivedEvent{ + SigMsg: lnwire.ClosingComplete{ + FeeSatoshis: absoluteFee, + Sequence: mempool.MaxRBFSequence + 1, + }, + } + closeHarness.chanCloser.SendEvent(feeOffer) + + // We shouldn't have transitioned to a new state. + closeHarness.assertNoStateTransitions() + }) + + // If our balance, is dust, then the remote party should send a + // signature that doesn't include our output. + t.Run("recv_offer_err_closer_no_closee", func(t *testing.T) { + // We'll modify our local balance to be dust. + closingTerms := *closeTerms + closingTerms.ShutdownBalances.LocalBalance = 100 + + firstState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &LocalCloseStart{ + CloseChannelTerms: closingTerms, + }, + Remote: &RemoteCloseStart{ + CloseChannelTerms: closingTerms, + }, + }, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](firstState), + }) + defer closeHarness.stopAndAssert() + + // We should fail as they included the wrong sig. + closeHarness.expectFailure(ErrCloserNoClosee) + + // Our balance is dust, so we should reject this signature that + // includes our output. + feeOffer := &OfferReceivedEvent{ + SigMsg: lnwire.ClosingComplete{ + FeeSatoshis: absoluteFee, + ClosingSigs: lnwire.ClosingSigs{ + CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll + remoteWireSig, + ), + }, + }, + } + closeHarness.chanCloser.SendEvent(feeOffer) + + // We shouldn't have transitioned to a new state. + closeHarness.assertNoStateTransitions() + }) + + // If no balances are dust, then they should send a sig covering both + // outputs. + t.Run("recv_offer_err_closer_and_closee", func(t *testing.T) { + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](startingState), + }) + defer closeHarness.stopAndAssert() + + // We should fail as they included the wrong sig. + closeHarness.expectFailure(ErrCloserAndClosee) + + // Both balances are above dust, we should reject this + // signature as it excludes an output. + feeOffer := &OfferReceivedEvent{ + SigMsg: lnwire.ClosingComplete{ + FeeSatoshis: absoluteFee, + ClosingSigs: lnwire.ClosingSigs{ + CloserNoClosee: newSigTlv[tlv.TlvType1]( //nolint:ll + remoteWireSig, + ), + }, + }, + } + closeHarness.chanCloser.SendEvent(feeOffer) + + // We shouldn't have transitioned to a new state. + closeHarness.assertNoStateTransitions() + }) + + // If everything lines up, then we should be able to do multiple RBF + // loops to enable the remote party to sign.new versions of the co-op + // close transaction. + t.Run("recv_offer_rbf_loop_iterations", func(t *testing.T) { + // We'll modify our s.t we're unable to pay for fees, but + // aren't yet dust. + closingTerms := *closeTerms + closingTerms.ShutdownBalances.LocalBalance = lnwire.NewMSatFromSatoshis( //nolint:ll + 9000, + ) + + firstState := &ClosingNegotiation{ + PeerState: lntypes.Dual[AsymmetricPeerState]{ + Local: &LocalCloseStart{ + CloseChannelTerms: closingTerms, + }, + Remote: &RemoteCloseStart{ + CloseChannelTerms: closingTerms, + }, + }, + } + + closeHarness := newCloser(t, &harnessCfg{ + initialState: fn.Some[ProtocolState](firstState), + localUpfrontAddr: fn.Some(localAddr), + }) + defer closeHarness.stopAndAssert() + + feeOffer := &OfferReceivedEvent{ + SigMsg: lnwire.ClosingComplete{ + FeeSatoshis: absoluteFee, + Sequence: sequence, + ClosingSigs: lnwire.ClosingSigs{ + CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll + remoteWireSig, + ), + }, + }, + } + + // As we're already in the negotiation phase, we'll now trigger + // a new iteration by having the remote party send a new offer + // sig. + closeHarness.assertSingleRemoteRbfIteration( + feeOffer, balanceAfterClose, absoluteFee, sequence, + false, + ) + + // At this point, we've completed a single RBF iteration, and + // want to test further iterations, so we'll use a shutdown + // even tot kick it all off. + // + // Before we send the shutdown messages below, we'll mark the + // balances as so we fast track to the negotiation state. + closeHarness.expectShutdownEvents(shutdownExpect{ + allowSend: true, + finalBalances: fn.Some(closingTerms.ShutdownBalances), + recvShutdown: true, + }) + closeHarness.expectMsgSent( + singleMsgMatcher[*lnwire.Shutdown](nil), + ) + + // We'll now simulate the start of the RBF loop, by receiving a + // new Shutdown message from the remote party. This signals + // that they want to obtain a new commit sig. + closeHarness.chanCloser.SendEvent(&ShutdownReceived{ + ShutdownScript: remoteAddr, + }) + + // Next, we'll receive an offer from the remote party, and + // drive another RBF iteration. This time, we'll increase the + // absolute fee by 1k sats. + feeOffer.SigMsg.FeeSatoshis += 1000 + absoluteFee = feeOffer.SigMsg.FeeSatoshis + closeHarness.assertSingleRemoteRbfIteration( + feeOffer, balanceAfterClose, absoluteFee, sequence, + true, + ) + + closeHarness.assertNoStateTransitions() + }) + + // TODO(roasbeef): cross sig case? tested isolation, so wolog? +} From 540d3c0fc77223ec051277ba92f2c6d902788208 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Tue, 24 Sep 2024 15:35:55 +0900 Subject: [PATCH 07/13] multi: switch to lock time from sequence for coop close v2 --- lnwallet/chancloser/rbf_coop_test.go | 27 +-------------------- lnwallet/chancloser/rbf_coop_transitions.go | 15 ++++-------- lnwire/closing_complete.go | 8 +++--- lnwire/lnwire_test.go | 2 +- peer/brontide.go | 7 ++++++ 5 files changed, 18 insertions(+), 41 deletions(-) diff --git a/lnwallet/chancloser/rbf_coop_test.go b/lnwallet/chancloser/rbf_coop_test.go index 1bfd574ba3..5cf985e8db 100644 --- a/lnwallet/chancloser/rbf_coop_test.go +++ b/lnwallet/chancloser/rbf_coop_test.go @@ -1430,31 +1430,6 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { closeHarness.assertNoStateTransitions() }) - // If the remote party sends us a signature with a final sequence, then - // we'll error out as it can't be RBF'd - t.Run("recv_offer_final_sequence", func(t *testing.T) { - closeHarness := newCloser(t, &harnessCfg{ - initialState: fn.Some[ProtocolState](startingState), - }) - defer closeHarness.stopAndAssert() - - // We should fail as they sent a non final sequence. - closeHarness.expectFailure(ErrNonFinalSequence) - - // We'll send an offer with something beyond the max RBF value, - // this should fail. - feeOffer := &OfferReceivedEvent{ - SigMsg: lnwire.ClosingComplete{ - FeeSatoshis: absoluteFee, - Sequence: mempool.MaxRBFSequence + 1, - }, - } - closeHarness.chanCloser.SendEvent(feeOffer) - - // We shouldn't have transitioned to a new state. - closeHarness.assertNoStateTransitions() - }) - // If our balance, is dust, then the remote party should send a // signature that doesn't include our output. t.Run("recv_offer_err_closer_no_closee", func(t *testing.T) { @@ -1559,7 +1534,7 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { feeOffer := &OfferReceivedEvent{ SigMsg: lnwire.ClosingComplete{ FeeSatoshis: absoluteFee, - Sequence: sequence, + LockTime: 10, ClosingSigs: lnwire.ClosingSigs{ CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll remoteWireSig, diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go index b3a038f38a..de7a09dd9c 100644 --- a/lnwallet/chancloser/rbf_coop_transitions.go +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -716,7 +716,9 @@ func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, Msgs: []lnwire.Message{&lnwire.ClosingComplete{ ChannelID: env.ChanID, FeeSatoshis: absoluteFee, - Sequence: mempool.MaxRBFSequence, + // TODO(roasbeef): thread thru proper height + // value + LockTime: mempool.MaxRBFSequence, ClosingSigs: closingSigs, }}, }} @@ -862,18 +864,11 @@ func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, // To start, we'll perform some basic validation of the sig // message they've sent. We'll validate that the remote party // actually has enough fees to pay the closing fees. - switch { - case !l.RemoteCanPayFees(msg.SigMsg.FeeSatoshis): + if !l.RemoteCanPayFees(msg.SigMsg.FeeSatoshis) { return nil, fmt.Errorf("%w: %v vs %v", ErrRemoteCannotPay, msg.SigMsg.FeeSatoshis, l.RemoteBalance.ToSatoshis()) - - // The sequence they send can't be the max sequence, as that would - // prevent RBF. - case msg.SigMsg.Sequence > mempool.MaxRBFSequence: - return nil, fmt.Errorf("%w: %v", ErrNonFinalSequence, - msg.SigMsg.Sequence) } // With the basic sanity checks out of the way, we'll now @@ -908,7 +903,7 @@ func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, } chanOpts := []lnwallet.ChanCloseOpt{ - lnwallet.WithCustomSequence(msg.SigMsg.Sequence), + lnwallet.WithCustomSequence(mempool.MaxRBFSequence), } chancloserLog.Infof("responding to close w/ local_addr=%x, "+ diff --git a/lnwire/closing_complete.go b/lnwire/closing_complete.go index d33abf6725..c16760a4e5 100644 --- a/lnwire/closing_complete.go +++ b/lnwire/closing_complete.go @@ -34,9 +34,9 @@ type ClosingComplete struct { // channel would like to propose for the close transaction. FeeSatoshis btcutil.Amount - // Sequence is the sequence number to be used in the input spending the + // LockTime is the locktime number to be used in the input spending the // funding transaction. - Sequence uint32 + LockTime uint32 // ClosingSigs houses the 3 possible signatures that can be sent. ClosingSigs @@ -79,7 +79,7 @@ func decodeClosingSigs(c *ClosingSigs, tlvRecords ExtraOpaqueData) error { // passed io.Reader. func (c *ClosingComplete) Decode(r io.Reader, _ uint32) error { // First, read out all the fields that are hard coded into the message. - err := ReadElements(r, &c.ChannelID, &c.FeeSatoshis, &c.Sequence) + err := ReadElements(r, &c.ChannelID, &c.FeeSatoshis, &c.LockTime) if err != nil { return err } @@ -129,7 +129,7 @@ func (c *ClosingComplete) Encode(w *bytes.Buffer, _ uint32) error { return err } - if err := WriteUint32(w, c.Sequence); err != nil { + if err := WriteUint32(w, c.LockTime); err != nil { return err } diff --git a/lnwire/lnwire_test.go b/lnwire/lnwire_test.go index 6bfbb465ec..f42426ac57 100644 --- a/lnwire/lnwire_test.go +++ b/lnwire/lnwire_test.go @@ -1352,7 +1352,7 @@ func TestLightningWireProtocol(t *testing.T) { req := ClosingComplete{ ChannelID: ChannelID(c), FeeSatoshis: btcutil.Amount(r.Int63()), - Sequence: uint32(r.Int63()), + LockTime: uint32(r.Int63()), ClosingSigs: ClosingSigs{}, } diff --git a/peer/brontide.go b/peer/brontide.go index a41b5080cb..f0399b4e8a 100644 --- a/peer/brontide.go +++ b/peer/brontide.go @@ -2275,6 +2275,13 @@ func messageSummary(msg lnwire.Message) string { return fmt.Sprintf("chan_id=%v, script=%x", msg.ChannelID, msg.Address[:]) + case *lnwire.ClosingComplete: + return fmt.Sprintf("chan_id=%v, fee_sat=%v, locktime=%v", + msg.ChannelID, msg.FeeSatoshis, msg.LockTime) + + case *lnwire.ClosingSig: + return fmt.Sprintf("chan_id=%v", msg.ChannelID) + case *lnwire.ClosingSigned: return fmt.Sprintf("chan_id=%v, fee_sat=%v", msg.ChannelID, msg.FeeSatoshis) From ab4297e1272492a2b399e2908ac47406817296c6 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Thu, 21 Nov 2024 20:05:12 -0800 Subject: [PATCH 08/13] lnwallet/chancloser: use block height as lock time for rbf-coop --- lnwallet/chancloser/rbf_coop_states.go | 3 +++ lnwallet/chancloser/rbf_coop_transitions.go | 4 +--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lnwallet/chancloser/rbf_coop_states.go b/lnwallet/chancloser/rbf_coop_states.go index 613e7c690d..40f738ab34 100644 --- a/lnwallet/chancloser/rbf_coop_states.go +++ b/lnwallet/chancloser/rbf_coop_states.go @@ -294,6 +294,9 @@ type Environment struct { // ChanType is the type of channel we're attempting to close. ChanType channeldb.ChannelType + // BlockHeight is the current block height. + BlockHeight uint32 + // DefaultFeeRate is the fee we'll use for the closing transaction if // the user didn't specify an ideal fee rate. This may happen if the // remote party is the one that initiates the co-op close. diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go index de7a09dd9c..0b57463c41 100644 --- a/lnwallet/chancloser/rbf_coop_transitions.go +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -716,9 +716,7 @@ func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, Msgs: []lnwire.Message{&lnwire.ClosingComplete{ ChannelID: env.ChanID, FeeSatoshis: absoluteFee, - // TODO(roasbeef): thread thru proper height - // value - LockTime: mempool.MaxRBFSequence, + LockTime: env.BlockHeight, ClosingSigs: closingSigs, }}, }} From b94ce6fa088f1f788b7343867229f8299cab32be Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Tue, 24 Sep 2024 16:46:28 +0900 Subject: [PATCH 09/13] lnwallet: use custom LockTime for rbf coop close --- lnwallet/chancloser/rbf_coop_test.go | 4 +-- lnwallet/chancloser/rbf_coop_transitions.go | 1 + lnwallet/channel.go | 38 +++++++++++++++++++-- 3 files changed, 39 insertions(+), 4 deletions(-) diff --git a/lnwallet/chancloser/rbf_coop_test.go b/lnwallet/chancloser/rbf_coop_test.go index 5cf985e8db..6b6f985fe1 100644 --- a/lnwallet/chancloser/rbf_coop_test.go +++ b/lnwallet/chancloser/rbf_coop_test.go @@ -547,8 +547,8 @@ func (r *rbfCloserTestHarness) expectHalfSignerIteration( } case *SendOfferEvent: - expectedStates = []RbfState{&ClosingNegotiation{}} + expectedStates = []RbfState{&ClosingNegotiation{}} case *ChannelFlushed: // If we're sending a flush event here, then this means that we // also have enough balance to cover the fee so we'll have @@ -1534,7 +1534,7 @@ func TestRbfCloseClosingNegotiationRemote(t *testing.T) { feeOffer := &OfferReceivedEvent{ SigMsg: lnwire.ClosingComplete{ FeeSatoshis: absoluteFee, - LockTime: 10, + LockTime: 1, ClosingSigs: lnwire.ClosingSigs{ CloserAndClosee: newSigTlv[tlv.TlvType3]( //nolint:ll remoteWireSig, diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go index 0b57463c41..fc54c9b1ce 100644 --- a/lnwallet/chancloser/rbf_coop_transitions.go +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -902,6 +902,7 @@ func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, chanOpts := []lnwallet.ChanCloseOpt{ lnwallet.WithCustomSequence(mempool.MaxRBFSequence), + lnwallet.WithCustomLockTime(msg.SigMsg.LockTime), } chancloserLog.Infof("responding to close w/ local_addr=%x, "+ diff --git a/lnwallet/channel.go b/lnwallet/channel.go index 8e1aed81b0..a1bc7cda2a 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -8201,6 +8201,8 @@ type chanCloseOpt struct { customSort CloseSortFunc customSequence fn.Option[uint32] + + customLockTime fn.Option[uint32] } // ChanCloseOpt is a closure type that cen be used to modify the set of default @@ -8234,7 +8236,7 @@ func WithExtraCloseOutputs(extraOutputs []CloseOutput) ChanCloseOpt { func WithCustomCoopSort(sorter CloseSortFunc) ChanCloseOpt { return func(opts *chanCloseOpt) { opts.customSort = sorter - } + } } // WithCustomSequence can be used to specify a custom sequence number for the @@ -8245,6 +8247,14 @@ func WithCustomSequence(sequence uint32) ChanCloseOpt { } } +// WithCustomLockTime can be used to specify a custom lock time for the coop +// close transaction. +func WithCustomLockTime(lockTime uint32) ChanCloseOpt { + return func(opts *chanCloseOpt) { + opts.customLockTime = fn.Some(lockTime) + } +} + // CreateCloseProposal is used by both parties in a cooperative channel close // workflow to generate proposed close transactions and signatures. This method // should only be executed once all pending HTLCs (if any) on the channel have @@ -8310,6 +8320,12 @@ func (lc *LightningChannel) CreateCloseProposal(proposedFee btcutil.Amount, )) }) + opts.customLockTime.WhenSome(func(lockTime uint32) { + closeTxOpts = append(closeTxOpts, WithCustomTxLockTime( + lockTime, + )) + }) + closeTx, err := CreateCooperativeCloseTx( fundingTxIn(lc.channelState), lc.channelState.LocalChanCfg.DustLimit, lc.channelState.RemoteChanCfg.DustLimit, ourBalance, theirBalance, @@ -8417,6 +8433,12 @@ func (lc *LightningChannel) CompleteCooperativeClose( )) }) + opts.customLockTime.WhenSome(func(lockTime uint32) { + closeTxOpts = append(closeTxOpts, WithCustomTxLockTime( + lockTime, + )) + }) + // Create the transaction used to return the current settled balance // on this active channel back to both parties. In this current model, // the initiator pays full fees for the cooperative close transaction. @@ -9138,6 +9160,8 @@ type closeTxOpts struct { // close transaction. This gives slightly more control compared to the // enableRBF option. customSequence fn.Option[uint32] + + customLockTime fn.Option[uint32] } // defaultCloseTxOpts returns a closeTxOpts struct with default values. @@ -9171,7 +9195,7 @@ func WithExtraTxCloseOutputs(extraOutputs []CloseOutput) CloseTxOpt { func WithCustomTxSort(sorter CloseSortFunc) CloseTxOpt { return func(opts *closeTxOpts) { opts.customSort = sorter - } + } } // WithCustomTxInSequence allows a caller to set a custom sequence on the sole @@ -9182,6 +9206,12 @@ func WithCustomTxInSequence(sequence uint32) CloseTxOpt { } } +func WithCustomTxLockTime(lockTime uint32) CloseTxOpt { + return func(o *closeTxOpts) { + o.customLockTime = fn.Some(lockTime) + } +} + // CreateCooperativeCloseTx creates a transaction which if signed by both // parties, then broadcast cooperatively closes an active channel. The creation // of the closure transaction is modified by a boolean indicating if the party @@ -9216,6 +9246,10 @@ func CreateCooperativeCloseTx(fundingTxIn wire.TxIn, closeTx := wire.NewMsgTx(2) closeTx.AddTxIn(&fundingTxIn) + opts.customLockTime.WhenSome(func(lockTime uint32) { + closeTx.LockTime = lockTime + }) + // TODO(roasbeef): needs support for dropping inputs // Create both cooperative closure outputs, properly respecting the From b8cf5ae98f87dbf0e31590817668c0c3a3ed31f7 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Wed, 25 Sep 2024 14:46:18 +0900 Subject: [PATCH 10/13] lnwallet: for rbf coop close, log the close tx --- lnwallet/chancloser/chancloser_test.go | 2 +- lnwallet/chancloser/interface.go | 3 +-- lnwallet/chancloser/mock.go | 4 ++-- lnwallet/chancloser/rbf_coop_states.go | 3 +-- lnwallet/chancloser/rbf_coop_test.go | 12 ++---------- lnwallet/chancloser/rbf_coop_transitions.go | 2 +- lnwallet/channel.go | 6 +++--- 7 files changed, 11 insertions(+), 21 deletions(-) diff --git a/lnwallet/chancloser/chancloser_test.go b/lnwallet/chancloser/chancloser_test.go index d0211414c5..f7bdc74b4f 100644 --- a/lnwallet/chancloser/chancloser_test.go +++ b/lnwallet/chancloser/chancloser_test.go @@ -189,7 +189,7 @@ func (m *mockChannel) RemoteUpfrontShutdownScript() lnwire.DeliveryAddress { func (m *mockChannel) CreateCloseProposal(fee btcutil.Amount, localScript, remoteScript []byte, - _ ...lnwallet.ChanCloseOpt) (input.Signature, *chainhash.Hash, + _ ...lnwallet.ChanCloseOpt) (input.Signature, *wire.MsgTx, btcutil.Amount, error) { if m.chanType.IsTaproot() { diff --git a/lnwallet/chancloser/interface.go b/lnwallet/chancloser/interface.go index f774c81039..74f0969737 100644 --- a/lnwallet/chancloser/interface.go +++ b/lnwallet/chancloser/interface.go @@ -3,7 +3,6 @@ package chancloser import ( "github.com/btcsuite/btcd/btcec/v2/schnorr/musig2" "github.com/btcsuite/btcd/btcutil" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/fn/v2" @@ -100,7 +99,7 @@ type Channel interface { //nolint:interfacebloat localDeliveryScript []byte, remoteDeliveryScript []byte, closeOpt ...lnwallet.ChanCloseOpt, ) ( - input.Signature, *chainhash.Hash, btcutil.Amount, error) + input.Signature, *wire.MsgTx, btcutil.Amount, error) // CompleteCooperativeClose persistently "completes" the cooperative // close by producing a fully signed co-op close transaction. diff --git a/lnwallet/chancloser/mock.go b/lnwallet/chancloser/mock.go index c6ea4fba21..a2cc4270c1 100644 --- a/lnwallet/chancloser/mock.go +++ b/lnwallet/chancloser/mock.go @@ -153,11 +153,11 @@ type mockCloseSigner struct { func (m *mockCloseSigner) CreateCloseProposal(fee btcutil.Amount, localScript []byte, remoteScript []byte, closeOpt ...lnwallet.ChanCloseOpt) ( - input.Signature, *chainhash.Hash, btcutil.Amount, error) { + input.Signature, *wire.MsgTx, btcutil.Amount, error) { args := m.Called(fee, localScript, remoteScript, closeOpt) - return args.Get(0).(input.Signature), args.Get(1).(*chainhash.Hash), + return args.Get(0).(input.Signature), args.Get(1).(*wire.MsgTx), args.Get(2).(btcutil.Amount), args.Error(3) } diff --git a/lnwallet/chancloser/rbf_coop_states.go b/lnwallet/chancloser/rbf_coop_states.go index 40f738ab34..d5c2070660 100644 --- a/lnwallet/chancloser/rbf_coop_states.go +++ b/lnwallet/chancloser/rbf_coop_states.go @@ -6,7 +6,6 @@ import ( "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" @@ -223,7 +222,7 @@ type CloseSigner interface { localDeliveryScript []byte, remoteDeliveryScript []byte, closeOpt ...lnwallet.ChanCloseOpt, ) ( - input.Signature, *chainhash.Hash, btcutil.Amount, error) + input.Signature, *wire.MsgTx, btcutil.Amount, error) // CompleteCooperativeClose persistently "completes" the cooperative // close by producing a fully signed co-op close transaction. diff --git a/lnwallet/chancloser/rbf_coop_test.go b/lnwallet/chancloser/rbf_coop_test.go index 6b6f985fe1..07bc32e3dd 100644 --- a/lnwallet/chancloser/rbf_coop_test.go +++ b/lnwallet/chancloser/rbf_coop_test.go @@ -13,7 +13,6 @@ import ( "github.com/btcsuite/btcd/btcec/v2/ecdsa" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/mempool" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" @@ -52,18 +51,11 @@ var ( remoteSig = sigMustParse(remoteSigBytes) remoteWireSig = mustWireSig(&remoteSig) - localTxid = newChainHash(bytes.Repeat([]byte{0x01}, 32)) - remoteTxid = newChainHash(bytes.Repeat([]byte{0x02}, 32)) + localTx = wire.MsgTx{Version: 2} closeTx = wire.NewMsgTx(2) ) -func newChainHash(b []byte) chainhash.Hash { - var h chainhash.Hash - copy(h[:], b) - return h -} - func sigMustParse(sigBytes []byte) ecdsa.Signature { sig, err := ecdsa.ParseSignature(sigBytes) if err != nil { @@ -386,7 +378,7 @@ func (r *rbfCloserTestHarness) expectNewCloseSig( r.signer.On( "CreateCloseProposal", fee, localScript, remoteScript, mock.Anything, - ).Return(&localSig, &localTxid, closeBalance, nil) + ).Return(&localSig, &localTx, closeBalance, nil) } func (r *rbfCloserTestHarness) waitForMsgSent() { diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go index fc54c9b1ce..2a7d95f9a5 100644 --- a/lnwallet/chancloser/rbf_coop_transitions.go +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -308,8 +308,8 @@ func (s *ShutdownPending) ProcessEvent(event ProtocolEvent, env *Environment, } // If the channel is *already* flushed, and the close is - // already in progress, then we can skip the flushing state and // go straight into negotiation, as this is the RBF loop. + // already in progress, then we can skip the flushing state and var eventsToEmit fn.Option[protofsm.EmittedEvent[ProtocolEvent]] finalBalances := env.ChanObserver.FinalBalances().UnwrapOr( unknownBalance, diff --git a/lnwallet/channel.go b/lnwallet/channel.go index a1bc7cda2a..a74d6ab3ab 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -8264,7 +8264,7 @@ func WithCustomLockTime(lockTime uint32) ChanCloseOpt { // returned. func (lc *LightningChannel) CreateCloseProposal(proposedFee btcutil.Amount, localDeliveryScript []byte, remoteDeliveryScript []byte, - closeOpts ...ChanCloseOpt) (input.Signature, *chainhash.Hash, + closeOpts ...ChanCloseOpt) (input.Signature, *wire.MsgTx, btcutil.Amount, error) { lc.Lock() @@ -8364,8 +8364,8 @@ func (lc *LightningChannel) CreateCloseProposal(proposedFee btcutil.Amount, } } - closeTXID := closeTx.TxHash() - return sig, &closeTXID, ourBalance, nil + return sig, closeTx, ourBalance, nil + } // CompleteCooperativeClose completes the cooperative closure of the target From d1b2bff2c8cfd0939775fbd6af324dab4a320da9 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 22 Nov 2024 19:05:18 -0800 Subject: [PATCH 11/13] lnwallet: update CoopCloseBalance to allow a paying party This preps us for an upcoming change to the rbf coop state machine where either party can pay for the channel fees. We also add a new test to make sure the new function adheres to some key properties. --- lnwallet/channel.go | 2 + lnwallet/close_test.go | 241 +++++++++++++++++++++++++++++++++++++++++ lnwallet/commitment.go | 31 +++++- 3 files changed, 268 insertions(+), 6 deletions(-) create mode 100644 lnwallet/close_test.go diff --git a/lnwallet/channel.go b/lnwallet/channel.go index a74d6ab3ab..61c266285b 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -8289,6 +8289,7 @@ func (lc *LightningChannel) CreateCloseProposal(proposedFee btcutil.Amount, lc.channelState.LocalCommitment.LocalBalance.ToSatoshis(), lc.channelState.LocalCommitment.RemoteBalance.ToSatoshis(), lc.channelState.LocalCommitment.CommitFee, + fn.None[lntypes.ChannelParty](), ) if err != nil { return nil, nil, 0, err @@ -8402,6 +8403,7 @@ func (lc *LightningChannel) CompleteCooperativeClose( lc.channelState.LocalCommitment.LocalBalance.ToSatoshis(), lc.channelState.LocalCommitment.RemoteBalance.ToSatoshis(), lc.channelState.LocalCommitment.CommitFee, + fn.None[lntypes.ChannelParty](), ) if err != nil { return nil, 0, err diff --git a/lnwallet/close_test.go b/lnwallet/close_test.go new file mode 100644 index 0000000000..7ece3fc458 --- /dev/null +++ b/lnwallet/close_test.go @@ -0,0 +1,241 @@ +package lnwallet + +import ( + "strconv" + "testing" + + "github.com/btcsuite/btcd/btcutil" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/fn/v2" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/stretchr/testify/require" + "pgregory.net/rapid" +) + +// genValidAmount generates valid bitcoin amounts (non-negative). +func genValidAmount(t *rapid.T, label string) btcutil.Amount { + return btcutil.Amount( + rapid.Int64Range( + 100_000, 21_000_000*100_000_000, + ).Draw(t, label), + ) +} + +// genCoopCloseFee generates a reasonable non-zero cooperative close fee. +func genCoopCloseFee(t *rapid.T) btcutil.Amount { + // Generate a fee between 250-10000 sats which is a reasonable range for + // closing transactions + return btcutil.Amount( + rapid.Int64Range(250, 10_000).Draw(t, "coop_close_fee"), + ) +} + +// genChannelType generates various channel types, ensuring good coverage of +// different channel configurations including anchor outputs and other features. +func genChannelType(t *rapid.T) channeldb.ChannelType { + var chanType channeldb.ChannelType + + // For each bit, decide randomly if it should be set. + bits := []channeldb.ChannelType{ + channeldb.DualFunderBit, + channeldb.SingleFunderTweaklessBit, + channeldb.NoFundingTxBit, + channeldb.AnchorOutputsBit, + channeldb.FrozenBit, + channeldb.ZeroHtlcTxFeeBit, + channeldb.LeaseExpirationBit, + channeldb.ZeroConfBit, + channeldb.ScidAliasChanBit, + channeldb.ScidAliasFeatureBit, + channeldb.SimpleTaprootFeatureBit, + channeldb.TapscriptRootBit, + } + + // Helper to bias towards setting specific bits more frequently. + setBit := func(bit channeldb.ChannelType, probability int) { + bitRange := rapid.IntRange(0, 100) + label := "bit_" + strconv.FormatUint(uint64(bit), 2) + if bitRange.Draw(t, label) < probability { + chanType |= bit + } + } + + // We want to ensure good coverage of anchor outputs since they affect + // the balance calculation directly. We'll set the anchor bit with a 50% + // chance. + setBit(channeldb.AnchorOutputsBit, 50) + + // For other bits, use varying probabilities to ensure good + // distribution. + for _, bit := range bits { + // The anchor bit was already set above so we can skip it here. + if bit == channeldb.AnchorOutputsBit { + continue + } + + // Some bits are related, so we'll make sure we capture that + // dep. + switch bit { + case channeldb.TapscriptRootBit: + // If we have TapscriptRootBit, we must have + // SimpleTaprootFeatureBit. + if chanType&channeldb.SimpleTaprootFeatureBit != 0 { + // 70% chance if taproot is enabled. + setBit(bit, 70) + } + + case channeldb.DualFunderBit: + // 40% chance of dual funding. + setBit(bit, 40) + + default: + // 30% chance for other bits. + setBit(bit, 30) + } + } + + return chanType +} + +// genFeePayer generates optional fee payer. +func genFeePayer(t *rapid.T) fn.Option[lntypes.ChannelParty] { + if !rapid.Bool().Draw(t, "has_fee_payer") { + return fn.None[lntypes.ChannelParty]() + } + + if rapid.Bool().Draw(t, "is_local") { + return fn.Some(lntypes.Local) + } + + return fn.Some(lntypes.Remote) +} + +// genCommitFee generates a reasonable non-zero commitment fee. +func genCommitFee(t *rapid.T) btcutil.Amount { + // Generate a reasonable commit fee between 100-5000 sats + return btcutil.Amount( + rapid.Int64Range(100, 5_000).Draw(t, "commit_fee"), + ) +} + +// TestCoopCloseBalance tests fundamental properties of CoopCloseBalance. This +// ensures that the closing fee is always subtracted from the correct balance, +// amongst other properties. +func TestCoopCloseBalance(tt *testing.T) { + tt.Parallel() + + rapid.Check(tt, func(t *rapid.T) { + require := require.New(t) + + // Generate test inputs + chanType := genChannelType(t) + isInitiator := rapid.Bool().Draw(t, "is_initiator") + + // Generate amounts using specific generators + coopCloseFee := genCoopCloseFee(t) + ourBalance := genValidAmount(t, "local balance") + theirBalance := genValidAmount(t, "remote balance") + feePayer := genFeePayer(t) + commitFee := genCommitFee(t) + + ourFinal, theirFinal, err := CoopCloseBalance( + chanType, isInitiator, coopCloseFee, + ourBalance, theirBalance, commitFee, feePayer, + ) + + // Property 1: If inputs are non-negative, we either get valid + // outputs or an error. + if err != nil { + // On error, final balances should be 0 + require.Zero( + ourFinal, + "expected zero our_balance on error", + ) + require.Zero( + theirFinal, + "expected zero their_balance on error", + ) + + return + } + + // Property 2: Final balances should be non-negative. + require.GreaterOrEqual( + ourFinal, btcutil.Amount(0), + "our final balance should be non-negative", + ) + require.GreaterOrEqual( + theirFinal, btcutil.Amount(0), + "their final balance should be non-negative", + ) + + // Property 3: Total balance should be conserved minus fees. + initialTotal := ourBalance + theirBalance + initialTotal += commitFee + + if chanType.HasAnchors() { + initialTotal += 2 * AnchorSize + } + + finalTotal := ourFinal + theirFinal + coopCloseFee + require.Equal( + initialTotal, finalTotal, + "total balance should be conserved", + ) + + // Property 4: When feePayer is specified, that party's balance + // should be reduced by exactly the coopCloseFee. + if feePayer.IsSome() { + payer := feePayer.UnwrapOrFail(tt) + + if payer == lntypes.Local { + require.LessOrEqual( + ourBalance-(ourFinal+coopCloseFee), + btcutil.Amount(0), + "local balance reduced by more than fee", //nolint:ll + ) + } else { + require.LessOrEqual( + theirBalance-(theirFinal+coopCloseFee), + btcutil.Amount(0), + "remote balance reduced by more than fee", //nolint:ll + ) + } + } + + // Property 5: For anchor channels, verify the correct final + // balance factors in the anchor amount. + if chanType.HasAnchors() { + // The initiator delta is the commit fee plus anchor + // amount. + initiatorDelta := commitFee + 2*AnchorSize + + // Default to initiator paying unless explicitly + // specified. + isLocalPaying := isInitiator + if feePayer.IsSome() { + isLocalPaying = feePayer.UnwrapOrFail(tt) == + lntypes.Local + } + + if isInitiator { + expectedBalance := ourBalance + initiatorDelta + if isLocalPaying { + expectedBalance -= coopCloseFee + } + + require.Equal(expectedBalance, ourFinal, + "initiator (local) balance incorrect") + } else { + // They are the initiator + expectedBalance := theirBalance + initiatorDelta + if !isLocalPaying { + expectedBalance -= coopCloseFee + } + + require.Equal(expectedBalance, theirFinal, + "initiator (remote) balance incorrect") + } + } + }) +} diff --git a/lnwallet/commitment.go b/lnwallet/commitment.go index 787e8a71e1..ab20d9afaa 100644 --- a/lnwallet/commitment.go +++ b/lnwallet/commitment.go @@ -1033,8 +1033,9 @@ func CreateCommitTx(chanType channeldb.ChannelType, // CoopCloseBalance returns the final balances that should be used to create // the cooperative close tx, given the channel type and transaction fee. func CoopCloseBalance(chanType channeldb.ChannelType, isInitiator bool, - coopCloseFee, ourBalance, theirBalance, - commitFee btcutil.Amount) (btcutil.Amount, btcutil.Amount, error) { + coopCloseFee, ourBalance, theirBalance, commitFee btcutil.Amount, + feePayer fn.Option[lntypes.ChannelParty], +) (btcutil.Amount, btcutil.Amount, error) { // We'll make sure we account for the complete balance by adding the // current dangling commitment fee to the balance of the initiator. @@ -1046,16 +1047,34 @@ func CoopCloseBalance(chanType channeldb.ChannelType, isInitiator bool, initiatorDelta += 2 * AnchorSize } - // The initiator will pay the full coop close fee, subtract that value - // from their balance. - initiatorDelta -= coopCloseFee - + // To start with, we'll add the anchor and/or commitment fee to the + // balance of the initiator. if isInitiator { ourBalance += initiatorDelta } else { theirBalance += initiatorDelta } + // With the initiator's balance credited, we'll now subtract the closing + // fee from the closing party. By default, the initiator pays the full + // amount, but this can be overridden by the feePayer option. + defaultPayer := func() lntypes.ChannelParty { + if isInitiator { + return lntypes.Local + } + + return lntypes.Remote + }() + payer := feePayer.UnwrapOr(defaultPayer) + + // Based on the payer computed above, we'll subtract the closing fee. + switch payer { + case lntypes.Local: + ourBalance -= coopCloseFee + case lntypes.Remote: + theirBalance -= coopCloseFee + } + // During fee negotiation it should always be verified that the // initiator can pay the proposed fee, but we do a sanity check just to // be sure here. From d38c5e6222c3f2ec45ffb97e3ffe5e9a8e6eb108 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Sun, 24 Nov 2024 14:08:19 -0800 Subject: [PATCH 12/13] lnwallet: update core coop close logic with custom payer In this commit, we update the core coop close logic with the new custom payer param. We also expand the existing unit tests to ensure that the fee is deducted from the proper party. --- lnwallet/channel.go | 38 ++++++++---- lnwallet/channel_test.go | 125 ++++++++++++++++++++++++++++++++------- 2 files changed, 129 insertions(+), 34 deletions(-) diff --git a/lnwallet/channel.go b/lnwallet/channel.go index 61c266285b..dc45a12bed 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -8203,6 +8203,8 @@ type chanCloseOpt struct { customSequence fn.Option[uint32] customLockTime fn.Option[uint32] + + customPayer fn.Option[lntypes.ChannelParty] } // ChanCloseOpt is a closure type that cen be used to modify the set of default @@ -8255,6 +8257,15 @@ func WithCustomLockTime(lockTime uint32) ChanCloseOpt { } } +// WithCustomPayer can be used to specify a custom payer for the closing +// transaction. This overrides the default payer, which is the initiator of the +// channel. +func WithCustomPayer(payer lntypes.ChannelParty) ChanCloseOpt { + return func(opts *chanCloseOpt) { + opts.customPayer = fn.Some(payer) + } +} + // CreateCloseProposal is used by both parties in a cooperative channel close // workflow to generate proposed close transactions and signatures. This method // should only be executed once all pending HTLCs (if any) on the channel have @@ -8270,16 +8281,17 @@ func (lc *LightningChannel) CreateCloseProposal(proposedFee btcutil.Amount, lc.Lock() defer lc.Unlock() - // If we're already closing the channel, then ignore this request. - if lc.isClosed { - return nil, nil, 0, ErrChanClosing - } - opts := defaultCloseOpts() for _, optFunc := range closeOpts { optFunc(opts) } + // Unless there's a custom payer (sign of the RBF flow), if we're + // already closing the channel, then ignore this request. + if lc.isClosed && opts.customPayer.IsNone() { + return nil, nil, 0, ErrChanClosing + } + // Get the final balances after subtracting the proposed fee, taking // care not to persist the adjusted balance, as the feeRate may change // during the channel closing process. @@ -8289,7 +8301,7 @@ func (lc *LightningChannel) CreateCloseProposal(proposedFee btcutil.Amount, lc.channelState.LocalCommitment.LocalBalance.ToSatoshis(), lc.channelState.LocalCommitment.RemoteBalance.ToSatoshis(), lc.channelState.LocalCommitment.CommitFee, - fn.None[lntypes.ChannelParty](), + opts.customPayer, ) if err != nil { return nil, nil, 0, err @@ -8385,17 +8397,17 @@ func (lc *LightningChannel) CompleteCooperativeClose( lc.Lock() defer lc.Unlock() - // If the channel is already closing, then ignore this request. - if lc.isClosed { - // TODO(roasbeef): check to ensure no pending payments - return nil, 0, ErrChanClosing - } - opts := defaultCloseOpts() for _, optFunc := range closeOpts { optFunc(opts) } + // Unless there's a custom payer (sign of the RBF flow), if we're + // already closing the channel, then ignore this request. + if lc.isClosed && opts.customPayer.IsNone() { + return nil, 0, ErrChanClosing + } + // Get the final balances after subtracting the proposed fee. ourBalance, theirBalance, err := CoopCloseBalance( lc.channelState.ChanType, lc.channelState.IsInitiator, @@ -8403,7 +8415,7 @@ func (lc *LightningChannel) CompleteCooperativeClose( lc.channelState.LocalCommitment.LocalBalance.ToSatoshis(), lc.channelState.LocalCommitment.RemoteBalance.ToSatoshis(), lc.channelState.LocalCommitment.CommitFee, - fn.None[lntypes.ChannelParty](), + opts.customPayer, ) if err != nil { return nil, 0, err diff --git a/lnwallet/channel_test.go b/lnwallet/channel_test.go index d0caa97812..9da174a3d8 100644 --- a/lnwallet/channel_test.go +++ b/lnwallet/channel_test.go @@ -770,29 +770,66 @@ func TestCommitHTLCSigCustomRecordSize(t *testing.T) { } // TestCooperativeChannelClosure checks that the coop close process finishes -// with an agreement from both parties, and that the final balances of the -// close tx check out. +// with an agreement from both parties, and that the final balances of the close +// tx check out. func TestCooperativeChannelClosure(t *testing.T) { - t.Run("tweakless", func(t *testing.T) { - testCoopClose(t, &coopCloseTestCase{ - chanType: channeldb.SingleFunderTweaklessBit, - }) - }) - t.Run("anchors", func(t *testing.T) { - testCoopClose(t, &coopCloseTestCase{ - chanType: channeldb.SingleFunderTweaklessBit | - channeldb.AnchorOutputsBit, - anchorAmt: AnchorSize * 2, + testCases := []struct { + name string + closeCase coopCloseTestCase + }{ + { + name: "tweakless", + closeCase: coopCloseTestCase{ + chanType: channeldb.SingleFunderTweaklessBit, + }, + }, + { + name: "anchors", + closeCase: coopCloseTestCase{ + chanType: channeldb.SingleFunderTweaklessBit | + channeldb.AnchorOutputsBit, + anchorAmt: AnchorSize * 2, + }, + }, + { + name: "anchors local pay", + closeCase: coopCloseTestCase{ + chanType: channeldb.SingleFunderTweaklessBit | + channeldb.AnchorOutputsBit, + anchorAmt: AnchorSize * 2, + customPayer: fn.Some(lntypes.Local), + }, + }, + { + name: "anchors remote pay", + closeCase: coopCloseTestCase{ + chanType: channeldb.SingleFunderTweaklessBit | + channeldb.AnchorOutputsBit, + anchorAmt: AnchorSize * 2, + customPayer: fn.Some(lntypes.Remote), + }, + }, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + testCoopClose(t, testCase.closeCase) }) - }) + } } type coopCloseTestCase struct { chanType channeldb.ChannelType anchorAmt btcutil.Amount + + customPayer fn.Option[lntypes.ChannelParty] +} + +type closeOpts struct { + aliceOpts []ChanCloseOpt + bobOpts []ChanCloseOpt } -func testCoopClose(t *testing.T, testCase *coopCloseTestCase) { +func testCoopClose(t *testing.T, testCase coopCloseTestCase) { t.Parallel() // Create a test channel which will be used for the duration of this @@ -813,17 +850,38 @@ func testCoopClose(t *testing.T, testCase *coopCloseTestCase) { bobChannel.channelState.LocalCommitment.FeePerKw, ) + customPayer := testCase.customPayer + + closeOpts := fn.MapOptionZ( + customPayer, func(payer lntypes.ChannelParty) closeOpts { + // If the local party is paying then from Alice's PoV, + // then local party is paying. From Bob's PoV, the + // remote party is paying. If the remote party is, then + // the opposite is true. + return closeOpts{ + aliceOpts: []ChanCloseOpt{ + WithCustomPayer(payer), + }, + bobOpts: []ChanCloseOpt{ + WithCustomPayer(payer.CounterParty()), + }, + } + }, + ) + // We'll start with both Alice and Bob creating a new close proposal // with the same fee. aliceFee := aliceChannel.CalcFee(aliceFeeRate) aliceSig, _, _, err := aliceChannel.CreateCloseProposal( aliceFee, aliceDeliveryScript, bobDeliveryScript, + closeOpts.aliceOpts..., ) require.NoError(t, err, "unable to create alice coop close proposal") bobFee := bobChannel.CalcFee(bobFeeRate) bobSig, _, _, err := bobChannel.CreateCloseProposal( bobFee, bobDeliveryScript, aliceDeliveryScript, + closeOpts.bobOpts..., ) require.NoError(t, err, "unable to create bob coop close proposal") @@ -832,14 +890,14 @@ func testCoopClose(t *testing.T, testCase *coopCloseTestCase) { // transaction is well formed, and the signatures verify. aliceCloseTx, bobTxBalance, err := bobChannel.CompleteCooperativeClose( bobSig, aliceSig, bobDeliveryScript, aliceDeliveryScript, - bobFee, + bobFee, closeOpts.bobOpts..., ) require.NoError(t, err, "unable to complete alice cooperative close") bobCloseSha := aliceCloseTx.TxHash() bobCloseTx, aliceTxBalance, err := aliceChannel.CompleteCooperativeClose( aliceSig, bobSig, aliceDeliveryScript, bobDeliveryScript, - aliceFee, + aliceFee, closeOpts.aliceOpts..., ) require.NoError(t, err, "unable to complete bob cooperative close") aliceCloseSha := bobCloseTx.TxHash() @@ -848,18 +906,43 @@ func testCoopClose(t *testing.T, testCase *coopCloseTestCase) { t.Fatalf("alice and bob close transactions don't match: %v", err) } - // Finally, make sure the final balances are correct from both's - // perspective. + type chanFees struct { + alice btcutil.Amount + bob btcutil.Amount + } + + // Compute the closing fees for each party. If not specified, Alice will + // always pay the fees. Otherwise, it depends on who the payer is. + closeFees := fn.MapOption(func(payer lntypes.ChannelParty) chanFees { + var alice, bob btcutil.Amount + + switch payer { + case lntypes.Local: + alice = bobFee + bob = 0 + case lntypes.Remote: + bob = bobFee + alice = 0 + } + + return chanFees{ + alice: alice, + bob: bob, + } + })(testCase.customPayer).UnwrapOr(chanFees{alice: bobFee}) + + // Finally, make sure the final balances are correct from both + // perspectives. aliceBalance := aliceChannel.channelState.LocalCommitment. LocalBalance.ToSatoshis() - // The commit balance have had the initiator's (Alice) commitfee and + // The commit balance have had the initiator's (Alice) commit fee and // any anchors subtracted, so add that back to the final expected // balance. Alice also pays the coop close fee, so that must be // subtracted. commitFee := aliceChannel.channelState.LocalCommitment.CommitFee expBalanceAlice := aliceBalance + commitFee + - testCase.anchorAmt - bobFee + testCase.anchorAmt - closeFees.alice if aliceTxBalance != expBalanceAlice { t.Fatalf("expected balance %v got %v", expBalanceAlice, aliceTxBalance) @@ -868,7 +951,7 @@ func testCoopClose(t *testing.T, testCase *coopCloseTestCase) { // Bob is not the initiator, so his final balance should simply be // equal to the latest commitment balance. expBalanceBob := bobChannel.channelState.LocalCommitment. - LocalBalance.ToSatoshis() + LocalBalance.ToSatoshis() - closeFees.bob if bobTxBalance != expBalanceBob { t.Fatalf("expected bob's balance to be %v got %v", expBalanceBob, bobTxBalance) From 3c5f96de7c409e208f16b143acca5131aafd376a Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Sun, 24 Nov 2024 14:07:13 -0800 Subject: [PATCH 13/13] lnwallet/chancloser: enable custom payer for rbf coop close In this commit, we enable a custom payer for the rbf coop close. This allows us to ensure that the party that started one side of the close flow pays the fees. --- lnwallet/chancloser/rbf_coop_transitions.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lnwallet/chancloser/rbf_coop_transitions.go b/lnwallet/chancloser/rbf_coop_transitions.go index 2a7d95f9a5..da45908b72 100644 --- a/lnwallet/chancloser/rbf_coop_transitions.go +++ b/lnwallet/chancloser/rbf_coop_transitions.go @@ -661,6 +661,7 @@ func (l *LocalCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, rawSig, closeTx, closeBalance, err := env.CloseSigner.CreateCloseProposal( //nolint:ll absoluteFee, localScript, l.RemoteDeliveryScript, lnwallet.WithCustomSequence(mempool.MaxRBFSequence), + lnwallet.WithCustomPayer(lntypes.Local), ) if err != nil { return nil, err @@ -808,6 +809,7 @@ func (l *LocalOfferSent) ProcessEvent(event ProtocolEvent, env *Environment, localSig, remoteSig, l.LocalDeliveryScript, l.RemoteDeliveryScript, l.ProposedFee, lnwallet.WithCustomSequence(mempool.MaxRBFSequence), + lnwallet.WithCustomPayer(lntypes.Local), ) if err != nil { return nil, err @@ -903,6 +905,7 @@ func (l *RemoteCloseStart) ProcessEvent(event ProtocolEvent, env *Environment, chanOpts := []lnwallet.ChanCloseOpt{ lnwallet.WithCustomSequence(mempool.MaxRBFSequence), lnwallet.WithCustomLockTime(msg.SigMsg.LockTime), + lnwallet.WithCustomPayer(lntypes.Remote), } chancloserLog.Infof("responding to close w/ local_addr=%x, "+