From 639e2baea16d7a87643f022a75f50446e6a56810 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 14 Nov 2025 23:13:25 +0000 Subject: [PATCH 1/7] Move unlocked part of `handle_monitor_update_completion` into an fn The `handle_monitor_update_completion` macro is called from a number of places in `channelmanager.rs` and dumps quite a bit of code into each callsite. Here we take the unlocked half of `handle_monitor_update_completion` and move it into a function in `ChannelManager`. As a result, building the `lightning` crate tests after a single-line `println` change in `channelmanager.rs` was reduced by around a full second (out of ~28.5 originally) on my machine using rustc 1.85.0. Memory usage of the `expand_crate` step went from +554MB to +548MB in the same test. --- lightning/src/ln/channelmanager.rs | 140 ++++++++++++++++------------- 1 file changed, 79 insertions(+), 61 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d84d46e8995..5b502c4245e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3531,7 +3531,7 @@ macro_rules! handle_monitor_update_completion { assert_eq!($chan.blocked_monitor_updates_pending(), 0); } let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let mut updates = $chan.monitor_updating_restored(&&logger, + let updates = $chan.monitor_updating_restored(&&logger, &$self.node_signer, $self.chain_hash, &*$self.config.read().unwrap(), $self.best_block.read().unwrap().height, |htlc_id| $self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &counterparty_node_id)); @@ -3569,66 +3569,11 @@ macro_rules! handle_monitor_update_completion { core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); - // If the channel belongs to a batch funding transaction, the progress of the batch - // should be updated as we have received funding_signed and persisted the monitor. - if let Some(txid) = unbroadcasted_batch_funding_txid { - let mut funding_batch_states = $self.funding_batch_states.lock().unwrap(); - let mut batch_completed = false; - if let Some(batch_state) = funding_batch_states.get_mut(&txid) { - let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| ( - *chan_id == channel_id && - *pubkey == counterparty_node_id - )); - if let Some(channel_state) = channel_state { - channel_state.2 = true; - } else { - debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update"); - } - batch_completed = batch_state.iter().all(|(_, _, completed)| *completed); - } else { - debug_assert!(false, "Missing batch state for channel which completed initial monitor update"); - } - - // When all channels in a batched funding transaction have become ready, it is not necessary - // to track the progress of the batch anymore and the state of the channels can be updated. - if batch_completed { - let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten(); - let per_peer_state = $self.per_peer_state.read().unwrap(); - let mut batch_funding_tx = None; - for (channel_id, counterparty_node_id, _) in removed_batch_state { - if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state = peer_state_mutex.lock().unwrap(); - if let Some(funded_chan) = peer_state.channel_by_id - .get_mut(&channel_id) - .and_then(Channel::as_funded_mut) - { - batch_funding_tx = batch_funding_tx.or_else(|| funded_chan.context.unbroadcasted_funding(&funded_chan.funding)); - funded_chan.set_batch_ready(); - let mut pending_events = $self.pending_events.lock().unwrap(); - emit_channel_pending_event!(pending_events, funded_chan); - } - } - } - if let Some(tx) = batch_funding_tx { - log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid()); - $self.tx_broadcaster.broadcast_transactions(&[&tx]); - } - } - } - - $self.handle_monitor_update_completion_actions(update_actions); - - if let Some(forwards) = htlc_forwards { - $self.forward_htlcs(&mut [forwards][..]); - } - if let Some(decode) = decode_update_add_htlcs { - $self.push_decode_update_add_htlcs(decode); - } - $self.finalize_claims(updates.finalized_claimed_htlcs); - for failure in updates.failed_htlcs.drain(..) { - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; - $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); - } + $self.post_monitor_update_unlock( + channel_id, counterparty_node_id, unbroadcasted_batch_funding_txid, update_actions, + htlc_forwards, decode_update_add_htlcs, updates.finalized_claimed_htlcs, + updates.failed_htlcs, + ); } } } @@ -9368,6 +9313,79 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ self.our_network_pubkey } + /// Handles actions which need to complete after a [`ChannelMonitorUpdate`] has been applied + /// which can happen after the per-peer state lock has been dropped. + fn post_monitor_update_unlock( + &self, channel_id: ChannelId, counterparty_node_id: PublicKey, + unbroadcasted_batch_funding_txid: Option, + update_actions: Vec, + htlc_forwards: Option, + decode_update_add_htlcs: Option<(u64, Vec)>, + finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, + failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + ) { + // If the channel belongs to a batch funding transaction, the progress of the batch + // should be updated as we have received funding_signed and persisted the monitor. + if let Some(txid) = unbroadcasted_batch_funding_txid { + let mut funding_batch_states = self.funding_batch_states.lock().unwrap(); + let mut batch_completed = false; + if let Some(batch_state) = funding_batch_states.get_mut(&txid) { + let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| ( + *chan_id == channel_id && + *pubkey == counterparty_node_id + )); + if let Some(channel_state) = channel_state { + channel_state.2 = true; + } else { + debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update"); + } + batch_completed = batch_state.iter().all(|(_, _, completed)| *completed); + } else { + debug_assert!(false, "Missing batch state for channel which completed initial monitor update"); + } + + // When all channels in a batched funding transaction have become ready, it is not necessary + // to track the progress of the batch anymore and the state of the channels can be updated. + if batch_completed { + let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten(); + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut batch_funding_tx = None; + for (channel_id, counterparty_node_id, _) in removed_batch_state { + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if let Some(funded_chan) = peer_state.channel_by_id + .get_mut(&channel_id) + .and_then(Channel::as_funded_mut) + { + batch_funding_tx = batch_funding_tx.or_else(|| funded_chan.context.unbroadcasted_funding(&funded_chan.funding)); + funded_chan.set_batch_ready(); + let mut pending_events = self.pending_events.lock().unwrap(); + emit_channel_pending_event!(pending_events, funded_chan); + } + } + } + if let Some(tx) = batch_funding_tx { + log_info!(self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid()); + self.tx_broadcaster.broadcast_transactions(&[&tx]); + } + } + } + + self.handle_monitor_update_completion_actions(update_actions); + + if let Some(forwards) = htlc_forwards { + self.forward_htlcs(&mut [forwards][..]); + } + if let Some(decode) = decode_update_add_htlcs { + self.push_decode_update_add_htlcs(decode); + } + self.finalize_claims(finalized_claimed_htlcs); + for failure in failed_htlcs { + let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; + self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); + } + } + #[rustfmt::skip] fn handle_monitor_update_completion_actions>(&self, actions: I) { debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread); From 4e4e34e3e5c78fdba72ad0a591884ead4e2907df Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 17 Nov 2025 15:06:49 +0000 Subject: [PATCH 2/7] Format the code moved in the previous commit --- lightning/src/ln/channelmanager.rs | 101 ++++++++++++++++++----------- 1 file changed, 63 insertions(+), 38 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5b502c4245e..081dda28c07 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3519,22 +3519,27 @@ macro_rules! emit_initial_channel_ready_event { /// Requires that `$chan.blocked_monitor_updates_pending() == 0` and the in-flight monitor update /// set for this channel is empty! macro_rules! handle_monitor_update_completion { - ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { - let channel_id = $chan.context.channel_id(); - let outbound_scid_alias = $chan.context().outbound_scid_alias(); - let counterparty_node_id = $chan.context.get_counterparty_node_id(); + ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => {{ + let chan_id = $chan.context.channel_id(); + let outbound_alias = $chan.context().outbound_scid_alias(); + let cp_node_id = $chan.context.get_counterparty_node_id(); #[cfg(debug_assertions)] { - let in_flight_updates = - $peer_state.in_flight_monitor_updates.get(&channel_id); + let in_flight_updates = $peer_state.in_flight_monitor_updates.get(&chan_id); assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); assert_eq!($chan.blocked_monitor_updates_pending(), 0); } let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let updates = $chan.monitor_updating_restored(&&logger, - &$self.node_signer, $self.chain_hash, &*$self.config.read().unwrap(), + let updates = $chan.monitor_updating_restored( + &&logger, + &$self.node_signer, + $self.chain_hash, + &*$self.config.read().unwrap(), $self.best_block.read().unwrap().height, - |htlc_id| $self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &counterparty_node_id)); + |htlc_id| { + $self.path_for_release_held_htlc(htlc_id, outbound_alias, &chan_id, &cp_node_id) + }, + ); let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() && $peer_state.is_connected @@ -3545,36 +3550,52 @@ macro_rules! handle_monitor_update_completion { // channels, but there's no reason not to just inform our counterparty of our fees // now. if let Ok((msg, _, _)) = $self.get_channel_update_for_unicast($chan) { - Some(MessageSendEvent::SendChannelUpdate { - node_id: counterparty_node_id, - msg, - }) - } else { None } - } else { None }; + Some(MessageSendEvent::SendChannelUpdate { node_id: cp_node_id, msg }) + } else { + None + } + } else { + None + }; - let update_actions = $peer_state.monitor_update_blocked_actions - .remove(&channel_id).unwrap_or(Vec::new()); + let update_actions = + $peer_state.monitor_update_blocked_actions.remove(&chan_id).unwrap_or(Vec::new()); let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( - &mut $peer_state.pending_msg_events, $chan, updates.raa, - updates.commitment_update, updates.commitment_order, updates.accepted_htlcs, - updates.pending_update_adds, updates.funding_broadcastable, updates.channel_ready, - updates.announcement_sigs, updates.tx_signatures, None, updates.channel_ready_order, + &mut $peer_state.pending_msg_events, + $chan, + updates.raa, + updates.commitment_update, + updates.commitment_order, + updates.accepted_htlcs, + updates.pending_update_adds, + updates.funding_broadcastable, + updates.channel_ready, + updates.announcement_sigs, + updates.tx_signatures, + None, + updates.channel_ready_order, ); if let Some(upd) = channel_update { $peer_state.pending_msg_events.push(upd); } - let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); + let unbroadcasted_batch_funding_txid = + $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); $self.post_monitor_update_unlock( - channel_id, counterparty_node_id, unbroadcasted_batch_funding_txid, update_actions, - htlc_forwards, decode_update_add_htlcs, updates.finalized_claimed_htlcs, + chan_id, + cp_node_id, + unbroadcasted_batch_funding_txid, + update_actions, + htlc_forwards, + decode_update_add_htlcs, + updates.finalized_claimed_htlcs, updates.failed_htlcs, ); - } } + }}; } /// Returns whether the monitor update is completed, `false` if the update is in-progress. @@ -9330,18 +9351,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let mut funding_batch_states = self.funding_batch_states.lock().unwrap(); let mut batch_completed = false; if let Some(batch_state) = funding_batch_states.get_mut(&txid) { - let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| ( - *chan_id == channel_id && - *pubkey == counterparty_node_id - )); + let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| { + *chan_id == channel_id && *pubkey == counterparty_node_id + }); if let Some(channel_state) = channel_state { channel_state.2 = true; } else { - debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update"); + debug_assert!(false, "Missing batch state after initial monitor update"); } batch_completed = batch_state.iter().all(|(_, _, completed)| *completed); } else { - debug_assert!(false, "Missing batch state for channel which completed initial monitor update"); + debug_assert!(false, "Missing batch state after initial monitor update"); } // When all channels in a batched funding transaction have become ready, it is not necessary @@ -9353,19 +9373,21 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ for (channel_id, counterparty_node_id, _) in removed_batch_state { if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state = peer_state_mutex.lock().unwrap(); - if let Some(funded_chan) = peer_state.channel_by_id - .get_mut(&channel_id) - .and_then(Channel::as_funded_mut) - { - batch_funding_tx = batch_funding_tx.or_else(|| funded_chan.context.unbroadcasted_funding(&funded_chan.funding)); + + let chan = peer_state.channel_by_id.get_mut(&channel_id); + if let Some(funded_chan) = chan.and_then(Channel::as_funded_mut) { + batch_funding_tx = batch_funding_tx.or_else(|| { + funded_chan.context.unbroadcasted_funding(&funded_chan.funding) + }); funded_chan.set_batch_ready(); + let mut pending_events = self.pending_events.lock().unwrap(); emit_channel_pending_event!(pending_events, funded_chan); } } } if let Some(tx) = batch_funding_tx { - log_info!(self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid()); + log_info!(self.logger, "Broadcasting batch funding tx {}", tx.compute_txid()); self.tx_broadcaster.broadcast_transactions(&[&tx]); } } @@ -9381,7 +9403,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } self.finalize_claims(finalized_claimed_htlcs); for failure in failed_htlcs { - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(counterparty_node_id), + channel_id, + }; self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None); } } From 2a650256337146284f430f61ba6299a774f4b231 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 15 Nov 2025 21:13:03 +0000 Subject: [PATCH 3/7] Move `ChannelError`-handling logic down in `channelmanager.rs` In the next commit we'll functionize some of the `ChannelError`-handling logic, requiring it have access to `handle_new_monitor_update_locked_actions_handled_by_caller!` which thus needs to be defined above it. --- lightning/src/ln/channelmanager.rs | 340 ++++++++++++++--------------- 1 file changed, 170 insertions(+), 170 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 081dda28c07..51b2f5747c5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3255,176 +3255,6 @@ macro_rules! handle_error { } }; } -/// Do not call this directly, use `convert_channel_err` instead. -#[rustfmt::skip] -macro_rules! locked_close_channel { - ($self: ident, $chan_context: expr, UNFUNDED) => {{ - $self.short_to_chan_info.write().unwrap().remove(&$chan_context.outbound_scid_alias()); - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$chan_context.outbound_scid_alias()); - debug_assert!(alias_removed); - }}; - ($self: ident, $peer_state: expr, $funded_chan: expr, $shutdown_res_mut: expr, FUNDED) => {{ - if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() { - handle_new_monitor_update_locked_actions_handled_by_caller!( - $self, funding_txo, update, $peer_state, $funded_chan.context - ); - } - // If there's a possibility that we need to generate further monitor updates for this - // channel, we need to store the last update_id of it. However, we don't want to insert - // into the map (which prevents the `PeerState` from being cleaned up) for channels that - // never even got confirmations (which would open us up to DoS attacks). - let update_id = $funded_chan.context.get_latest_monitor_update_id(); - if $funded_chan.funding.get_funding_tx_confirmation_height().is_some() || $funded_chan.context.minimum_depth(&$funded_chan.funding) == Some(0) || update_id > 1 { - let chan_id = $funded_chan.context.channel_id(); - $peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id); - } - let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - if let Some(short_id) = $funded_chan.funding.get_short_channel_id() { - short_to_chan_info.remove(&short_id); - } else { - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$funded_chan.context.outbound_scid_alias()); - debug_assert!(alias_removed); - } - short_to_chan_info.remove(&$funded_chan.context.outbound_scid_alias()); - for scid in $funded_chan.context.historical_scids() { - short_to_chan_info.remove(scid); - } - }} -} - -/// When a channel is removed, two things need to happen: -/// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, -/// (b) [`handle_error`] needs to be called without holding any locks (except -/// [`ChannelManager::total_consistency_lock`]), which then calls -/// [`ChannelManager::finish_close_channel`]. -/// -/// Note that this step can be skipped if the channel was never opened (through the creation of a -/// [`ChannelMonitor`]/channel funding transaction) to begin with. -/// -/// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped -/// error)`, except in the `COOP_CLOSE` case, where the bool is elided (it is always implicitly -/// true). -#[rustfmt::skip] -macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $err: expr, $chan: expr, $close: expr, $locked_close: expr, $channel_id: expr, _internal) => { { - match $err { - ChannelError::Warn(msg) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id)) - }, - ChannelError::WarnAndDisconnect(msg) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::WarnAndDisconnect(msg), $channel_id)) - }, - ChannelError::Ignore(msg) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id)) - }, - ChannelError::Abort(reason) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Abort(reason), $channel_id)) - }, - ChannelError::Close((msg, reason)) => { - let (mut shutdown_res, chan_update) = $close(reason); - let logger = WithChannelContext::from(&$self.logger, &$chan.context(), None); - log_error!(logger, "Closed channel due to close-required error: {}", msg); - $locked_close(&mut shutdown_res, $chan); - let err = - MsgHandleErrInternal::from_finish_shutdown(msg, $channel_id, shutdown_res, chan_update); - (true, err) - }, - ChannelError::SendError(msg) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::SendError(msg), $channel_id)) - }, - } - } }; - ($self: ident, $peer_state: expr, $shutdown_result: expr, $funded_channel: expr, COOP_CLOSED) => { { - let chan_id = $funded_channel.context.channel_id(); - let reason = ChannelError::Close(("Coop Closed".to_owned(), $shutdown_result.closure_reason.clone())); - let do_close = |_| { - ( - $shutdown_result, - $self.get_channel_update_for_broadcast(&$funded_channel).ok(), - ) - }; - let mut locked_close = |shutdown_res_mut: &mut ShutdownResult, funded_channel: &mut FundedChannel<_>| { - locked_close_channel!($self, $peer_state, funded_channel, shutdown_res_mut, FUNDED); - }; - let (close, mut err) = - convert_channel_err!($self, $peer_state, reason, $funded_channel, do_close, locked_close, chan_id, _internal); - err.dont_send_error_message(); - debug_assert!(close); - err - } }; - ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { { - let chan_id = $funded_channel.context.channel_id(); - let mut do_close = |reason| { - ( - $funded_channel.force_shutdown(reason), - $self.get_channel_update_for_broadcast(&$funded_channel).ok(), - ) - }; - let mut locked_close = |shutdown_res_mut: &mut ShutdownResult, funded_channel: &mut FundedChannel<_>| { - locked_close_channel!($self, $peer_state, funded_channel, shutdown_res_mut, FUNDED); - }; - convert_channel_err!($self, $peer_state, $err, $funded_channel, do_close, locked_close, chan_id, _internal) - } }; - ($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { { - let chan_id = $channel.context().channel_id(); - let mut do_close = |reason| { ($channel.force_shutdown(reason), None) }; - let locked_close = |_, chan: &mut Channel<_>| { locked_close_channel!($self, chan.context(), UNFUNDED); }; - convert_channel_err!($self, $peer_state, $err, $channel, do_close, locked_close, chan_id, _internal) - } }; - ($self: ident, $peer_state: expr, $err: expr, $channel: expr) => { - match $channel.as_funded_mut() { - Some(funded_channel) => { - convert_channel_err!($self, $peer_state, $err, funded_channel, FUNDED_CHANNEL) - }, - None => { - convert_channel_err!($self, $peer_state, $err, $channel, UNFUNDED_CHANNEL) - }, - } - }; -} - -macro_rules! break_channel_entry { - ($self: ident, $peer_state: expr, $res: expr, $entry: expr) => { - match $res { - Ok(res) => res, - Err(e) => { - let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut()); - if drop { - $entry.remove_entry(); - } - break Err(res); - }, - } - }; -} - -macro_rules! try_channel_entry { - ($self: ident, $peer_state: expr, $res: expr, $entry: expr) => { - match $res { - Ok(res) => res, - Err(e) => { - let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut()); - if drop { - $entry.remove_entry(); - } - return Err(res); - }, - } - }; -} - macro_rules! send_channel_ready { ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{ if $channel.context.is_connected() { @@ -3778,6 +3608,176 @@ macro_rules! handle_new_monitor_update { }}; } +/// Do not call this directly, use `convert_channel_err` instead. +#[rustfmt::skip] +macro_rules! locked_close_channel { + ($self: ident, $chan_context: expr, UNFUNDED) => {{ + $self.short_to_chan_info.write().unwrap().remove(&$chan_context.outbound_scid_alias()); + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$chan_context.outbound_scid_alias()); + debug_assert!(alias_removed); + }}; + ($self: ident, $peer_state: expr, $funded_chan: expr, $shutdown_res_mut: expr, FUNDED) => {{ + if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() { + handle_new_monitor_update_locked_actions_handled_by_caller!( + $self, funding_txo, update, $peer_state, $funded_chan.context + ); + } + // If there's a possibility that we need to generate further monitor updates for this + // channel, we need to store the last update_id of it. However, we don't want to insert + // into the map (which prevents the `PeerState` from being cleaned up) for channels that + // never even got confirmations (which would open us up to DoS attacks). + let update_id = $funded_chan.context.get_latest_monitor_update_id(); + if $funded_chan.funding.get_funding_tx_confirmation_height().is_some() || $funded_chan.context.minimum_depth(&$funded_chan.funding) == Some(0) || update_id > 1 { + let chan_id = $funded_chan.context.channel_id(); + $peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id); + } + let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); + if let Some(short_id) = $funded_chan.funding.get_short_channel_id() { + short_to_chan_info.remove(&short_id); + } else { + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$funded_chan.context.outbound_scid_alias()); + debug_assert!(alias_removed); + } + short_to_chan_info.remove(&$funded_chan.context.outbound_scid_alias()); + for scid in $funded_chan.context.historical_scids() { + short_to_chan_info.remove(scid); + } + }} +} + +/// When a channel is removed, two things need to happen: +/// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, +/// (b) [`handle_error`] needs to be called without holding any locks (except +/// [`ChannelManager::total_consistency_lock`]), which then calls +/// [`ChannelManager::finish_close_channel`]. +/// +/// Note that this step can be skipped if the channel was never opened (through the creation of a +/// [`ChannelMonitor`]/channel funding transaction) to begin with. +/// +/// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped +/// error)`, except in the `COOP_CLOSE` case, where the bool is elided (it is always implicitly +/// true). +#[rustfmt::skip] +macro_rules! convert_channel_err { + ($self: ident, $peer_state: expr, $err: expr, $chan: expr, $close: expr, $locked_close: expr, $channel_id: expr, _internal) => { { + match $err { + ChannelError::Warn(msg) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id)) + }, + ChannelError::WarnAndDisconnect(msg) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::WarnAndDisconnect(msg), $channel_id)) + }, + ChannelError::Ignore(msg) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id)) + }, + ChannelError::Abort(reason) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Abort(reason), $channel_id)) + }, + ChannelError::Close((msg, reason)) => { + let (mut shutdown_res, chan_update) = $close(reason); + let logger = WithChannelContext::from(&$self.logger, &$chan.context(), None); + log_error!(logger, "Closed channel due to close-required error: {}", msg); + $locked_close(&mut shutdown_res, $chan); + let err = + MsgHandleErrInternal::from_finish_shutdown(msg, $channel_id, shutdown_res, chan_update); + (true, err) + }, + ChannelError::SendError(msg) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::SendError(msg), $channel_id)) + }, + } + } }; + ($self: ident, $peer_state: expr, $shutdown_result: expr, $funded_channel: expr, COOP_CLOSED) => { { + let chan_id = $funded_channel.context.channel_id(); + let reason = ChannelError::Close(("Coop Closed".to_owned(), $shutdown_result.closure_reason.clone())); + let do_close = |_| { + ( + $shutdown_result, + $self.get_channel_update_for_broadcast(&$funded_channel).ok(), + ) + }; + let mut locked_close = |shutdown_res_mut: &mut ShutdownResult, funded_channel: &mut FundedChannel<_>| { + locked_close_channel!($self, $peer_state, funded_channel, shutdown_res_mut, FUNDED); + }; + let (close, mut err) = + convert_channel_err!($self, $peer_state, reason, $funded_channel, do_close, locked_close, chan_id, _internal); + err.dont_send_error_message(); + debug_assert!(close); + err + } }; + ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { { + let chan_id = $funded_channel.context.channel_id(); + let mut do_close = |reason| { + ( + $funded_channel.force_shutdown(reason), + $self.get_channel_update_for_broadcast(&$funded_channel).ok(), + ) + }; + let mut locked_close = |shutdown_res_mut: &mut ShutdownResult, funded_channel: &mut FundedChannel<_>| { + locked_close_channel!($self, $peer_state, funded_channel, shutdown_res_mut, FUNDED); + }; + convert_channel_err!($self, $peer_state, $err, $funded_channel, do_close, locked_close, chan_id, _internal) + } }; + ($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { { + let chan_id = $channel.context().channel_id(); + let mut do_close = |reason| { ($channel.force_shutdown(reason), None) }; + let locked_close = |_, chan: &mut Channel<_>| { locked_close_channel!($self, chan.context(), UNFUNDED); }; + convert_channel_err!($self, $peer_state, $err, $channel, do_close, locked_close, chan_id, _internal) + } }; + ($self: ident, $peer_state: expr, $err: expr, $channel: expr) => { + match $channel.as_funded_mut() { + Some(funded_channel) => { + convert_channel_err!($self, $peer_state, $err, funded_channel, FUNDED_CHANNEL) + }, + None => { + convert_channel_err!($self, $peer_state, $err, $channel, UNFUNDED_CHANNEL) + }, + } + }; +} + +macro_rules! break_channel_entry { + ($self: ident, $peer_state: expr, $res: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(e) => { + let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut()); + if drop { + $entry.remove_entry(); + } + break Err(res); + }, + } + }; +} + +macro_rules! try_channel_entry { + ($self: ident, $peer_state: expr, $res: expr, $entry: expr) => { + match $res { + Ok(res) => res, + Err(e) => { + let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut()); + if drop { + $entry.remove_entry(); + } + return Err(res); + }, + } + }; +} + #[rustfmt::skip] macro_rules! process_events_body { ($self: expr, $event_to_handle: expr, $handle_event: expr) => { From 252a75a55357c0a1405e1596a229ecb7c50b8446 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 15 Nov 2025 21:29:58 +0000 Subject: [PATCH 4/7] Functionize the FUNDED cases in `convert_channel_err` `convert_channel_err` is used extensively in `channelmanager.rs` (often indirectly via `try_channel_entry`) and generates nontrivial code (especially once you include `locked_close_channel`). Here we take the `FUNDED` cases of it and move them to an internal function reducing the generated code size. On the same machine as described two commits ago, this further reduces build times by about another second. --- lightning/src/ln/channelmanager.rs | 104 ++++++++++++++++++++--------- 1 file changed, 73 insertions(+), 31 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 51b2f5747c5..000e081b092 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3566,11 +3566,11 @@ macro_rules! handle_post_close_monitor_update { /// later time. macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr + $self: ident, $funding_txo: expr, $update: expr, $in_flight_monitor_updates: expr, $chan_context: expr ) => {{ let (update_completed, _all_updates_complete) = handle_new_monitor_update_internal( $self, - &mut $peer_state.in_flight_monitor_updates, + $in_flight_monitor_updates, $chan_context.channel_id(), $funding_txo, $chan_context.get_counterparty_node_id(), @@ -3622,10 +3622,10 @@ macro_rules! locked_close_channel { let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$chan_context.outbound_scid_alias()); debug_assert!(alias_removed); }}; - ($self: ident, $peer_state: expr, $funded_chan: expr, $shutdown_res_mut: expr, FUNDED) => {{ + ($self: ident, $closed_channel_monitor_update_ids: expr, $in_flight_monitor_updates: expr, $funded_chan: expr, $shutdown_res_mut: expr, FUNDED) => {{ if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() { handle_new_monitor_update_locked_actions_handled_by_caller!( - $self, funding_txo, update, $peer_state, $funded_chan.context + $self, funding_txo, update, $in_flight_monitor_updates, $funded_chan.context ); } // If there's a possibility that we need to generate further monitor updates for this @@ -3635,7 +3635,7 @@ macro_rules! locked_close_channel { let update_id = $funded_chan.context.get_latest_monitor_update_id(); if $funded_chan.funding.get_funding_tx_confirmation_height().is_some() || $funded_chan.context.minimum_depth(&$funded_chan.funding) == Some(0) || update_id > 1 { let chan_id = $funded_chan.context.channel_id(); - $peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id); + $closed_channel_monitor_update_ids.insert(chan_id, update_id); } let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); if let Some(short_id) = $funded_chan.funding.get_short_channel_id() { @@ -3657,6 +3657,60 @@ macro_rules! locked_close_channel { }} } +fn convert_channel_err_internal< + Close: FnOnce(ClosureReason, &str) -> (ShutdownResult, Option<(msgs::ChannelUpdate, NodeId, NodeId)>), +>( + err: ChannelError, chan_id: ChannelId, close: Close, +) -> (bool, MsgHandleErrInternal) { + match err { + ChannelError::Warn(msg) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), chan_id)) + }, + ChannelError::WarnAndDisconnect(msg) => ( + false, + MsgHandleErrInternal::from_chan_no_close(ChannelError::WarnAndDisconnect(msg), chan_id), + ), + ChannelError::Ignore(msg) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), chan_id)) + }, + ChannelError::Abort(reason) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Abort(reason), chan_id)) + }, + ChannelError::Close((msg, reason)) => { + let (finish, chan_update) = close(reason, &msg); + (true, MsgHandleErrInternal::from_finish_shutdown(msg, chan_id, finish, chan_update)) + }, + ChannelError::SendError(msg) => { + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::SendError(msg), chan_id)) + }, + } +} + +fn convert_funded_channel_err_internal>( + cm: &CM, closed_update_ids: &mut BTreeMap, + in_flight_updates: &mut BTreeMap)>, + coop_close_shutdown_res: Option, err: ChannelError, + chan: &mut FundedChannel, +) -> (bool, MsgHandleErrInternal) +where + SP::Target: SignerProvider, + CM::Watch: Watch<::EcdsaSigner>, +{ + let chan_id = chan.context.channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let cm = cm.get_cm(); + let logger = WithChannelContext::from(&cm.logger, &chan.context, None); + + let mut shutdown_res = + if let Some(res) = coop_close_shutdown_res { res } else { chan.force_shutdown(reason) }; + let chan_update = cm.get_channel_update_for_broadcast(chan).ok(); + + log_error!(logger, "Closed channel due to close-required error: {}", msg); + locked_close_channel!(cm, closed_update_ids, in_flight_updates, chan, shutdown_res, FUNDED); + (shutdown_res, chan_update) + }) +} + /// When a channel is removed, two things need to happen: /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, /// (b) [`handle_error`] needs to be called without holding any locks (except @@ -3700,35 +3754,19 @@ macro_rules! convert_channel_err { } } }; ($self: ident, $peer_state: expr, $shutdown_result: expr, $funded_channel: expr, COOP_CLOSED) => { { - let chan_id = $funded_channel.context.channel_id(); let reason = ChannelError::Close(("Coop Closed".to_owned(), $shutdown_result.closure_reason.clone())); - let do_close = |_| { - ( - $shutdown_result, - $self.get_channel_update_for_broadcast(&$funded_channel).ok(), - ) - }; - let mut locked_close = |shutdown_res_mut: &mut ShutdownResult, funded_channel: &mut FundedChannel<_>| { - locked_close_channel!($self, $peer_state, funded_channel, shutdown_res_mut, FUNDED); - }; + let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; + let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; let (close, mut err) = - convert_channel_err!($self, $peer_state, reason, $funded_channel, do_close, locked_close, chan_id, _internal); + convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, Some($shutdown_result), reason, $funded_channel); err.dont_send_error_message(); debug_assert!(close); err } }; ($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { { - let chan_id = $funded_channel.context.channel_id(); - let mut do_close = |reason| { - ( - $funded_channel.force_shutdown(reason), - $self.get_channel_update_for_broadcast(&$funded_channel).ok(), - ) - }; - let mut locked_close = |shutdown_res_mut: &mut ShutdownResult, funded_channel: &mut FundedChannel<_>| { - locked_close_channel!($self, $peer_state, funded_channel, shutdown_res_mut, FUNDED); - }; - convert_channel_err!($self, $peer_state, $err, $funded_channel, do_close, locked_close, chan_id, _internal) + let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; + let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; + convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, None, $err, $funded_channel) } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { { let chan_id = $channel.context().channel_id(); @@ -3739,7 +3777,9 @@ macro_rules! convert_channel_err { ($self: ident, $peer_state: expr, $err: expr, $channel: expr) => { match $channel.as_funded_mut() { Some(funded_channel) => { - convert_channel_err!($self, $peer_state, $err, funded_channel, FUNDED_CHANNEL) + let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; + let in_flight_updates = &mut $peer_state.in_flight_monitor_updates; + convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, None, $err, funded_channel) }, None => { convert_channel_err!($self, $peer_state, $err, $channel, UNFUNDED_CHANNEL) @@ -4506,7 +4546,8 @@ where let mut has_uncompleted_channel = None; for (channel_id, counterparty_node_id, state) in affected_channels { if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { - let mut peer_state = peer_state_mutex.lock().unwrap(); + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { let reason = ClosureReason::FundingBatchClosure; let err = ChannelError::Close((reason.to_string(), reason)); @@ -6350,9 +6391,10 @@ where per_peer_state.get(&counterparty_node_id) .map(|peer_state_mutex| peer_state_mutex.lock().unwrap()) .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state))) - .map(|(mut chan, mut peer_state)| { + .map(|(mut chan, mut peer_state_lock)| { let reason = ClosureReason::ProcessingError { err: e.clone() }; let err = ChannelError::Close((e.clone(), reason)); + let peer_state = &mut *peer_state_lock; let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan); shutdown_results.push((Err(e), counterparty_node_id)); @@ -14385,7 +14427,7 @@ where self, funding_txo, monitor_update, - peer_state, + &mut peer_state.in_flight_monitor_updates, funded_channel.context ); to_process_monitor_update_actions.push(( From 0b0b56ba29add4feba644524ff97e84a36806c44 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 15 Nov 2025 21:31:35 +0000 Subject: [PATCH 5/7] Functionize the UNFUNDED case in `convert_channel_err` `convert_channel_err` is used extensively in `channelmanager.rs` (often indirectly via `try_channel_entry`) and generates nontrivial code (especially once you include `locked_close_channel`). Here we take the `UNFUNDED` case of it and move them to an internal function reducing the generated code size. On the same machine as described three commits ago, this further reduces build times from the previous commit by about another second. --- lightning/src/ln/channelmanager.rs | 53 +++++++++++------------------- 1 file changed, 20 insertions(+), 33 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 000e081b092..ec22d1c24a9 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3711,6 +3711,24 @@ where }) } +fn convert_unfunded_channel_err_internal( + cm: &CM, err: ChannelError, chan: &mut Channel, +) -> (bool, MsgHandleErrInternal) +where + SP::Target: SignerProvider, +{ + let chan_id = chan.context().channel_id(); + convert_channel_err_internal(err, chan_id, |reason, msg| { + let cm = cm.get_cm(); + let logger = WithChannelContext::from(&cm.logger, chan.context(), None); + + let shutdown_res = chan.force_shutdown(reason); + log_error!(logger, "Closed channel due to close-required error: {}", msg); + locked_close_channel!(cm, chan.context(), UNFUNDED); + (shutdown_res, None) + }) +} + /// When a channel is removed, two things need to happen: /// (a) This must be called in the same `per_peer_state` lock as the channel-closing action, /// (b) [`handle_error`] needs to be called without holding any locks (except @@ -3725,34 +3743,6 @@ where /// true). #[rustfmt::skip] macro_rules! convert_channel_err { - ($self: ident, $peer_state: expr, $err: expr, $chan: expr, $close: expr, $locked_close: expr, $channel_id: expr, _internal) => { { - match $err { - ChannelError::Warn(msg) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id)) - }, - ChannelError::WarnAndDisconnect(msg) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::WarnAndDisconnect(msg), $channel_id)) - }, - ChannelError::Ignore(msg) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id)) - }, - ChannelError::Abort(reason) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Abort(reason), $channel_id)) - }, - ChannelError::Close((msg, reason)) => { - let (mut shutdown_res, chan_update) = $close(reason); - let logger = WithChannelContext::from(&$self.logger, &$chan.context(), None); - log_error!(logger, "Closed channel due to close-required error: {}", msg); - $locked_close(&mut shutdown_res, $chan); - let err = - MsgHandleErrInternal::from_finish_shutdown(msg, $channel_id, shutdown_res, chan_update); - (true, err) - }, - ChannelError::SendError(msg) => { - (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::SendError(msg), $channel_id)) - }, - } - } }; ($self: ident, $peer_state: expr, $shutdown_result: expr, $funded_channel: expr, COOP_CLOSED) => { { let reason = ChannelError::Close(("Coop Closed".to_owned(), $shutdown_result.closure_reason.clone())); let closed_update_ids = &mut $peer_state.closed_channel_monitor_update_ids; @@ -3769,10 +3759,7 @@ macro_rules! convert_channel_err { convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, None, $err, $funded_channel) } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { { - let chan_id = $channel.context().channel_id(); - let mut do_close = |reason| { ($channel.force_shutdown(reason), None) }; - let locked_close = |_, chan: &mut Channel<_>| { locked_close_channel!($self, chan.context(), UNFUNDED); }; - convert_channel_err!($self, $peer_state, $err, $channel, do_close, locked_close, chan_id, _internal) + convert_unfunded_channel_err_internal($self, $err, $channel) } }; ($self: ident, $peer_state: expr, $err: expr, $channel: expr) => { match $channel.as_funded_mut() { @@ -3782,7 +3769,7 @@ macro_rules! convert_channel_err { convert_funded_channel_err_internal($self, closed_update_ids, in_flight_updates, None, $err, funded_channel) }, None => { - convert_channel_err!($self, $peer_state, $err, $channel, UNFUNDED_CHANNEL) + convert_unfunded_channel_err_internal($self, $err, $channel) }, } }; From b6974a020611653c5d26c0ec217ea91bbc0552df Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 15 Nov 2025 21:58:48 +0000 Subject: [PATCH 6/7] Inline `locked_close_channel` in the new `convert_*_channel_err` Now that `convert_channel_err` is primarily in two functions, `locked_close_channel` is just a dumb macro that has two forms, each only called once. Instead, drop it and inline its contents into `convert_*_channel_err`. --- lightning/src/ln/channelmanager.rs | 107 +++++++++++++---------------- 1 file changed, 48 insertions(+), 59 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ec22d1c24a9..ff3aed5eba2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3608,55 +3608,6 @@ macro_rules! handle_new_monitor_update { }}; } -/// Do not call this directly, use `convert_channel_err` instead. -#[rustfmt::skip] -macro_rules! locked_close_channel { - ($self: ident, $chan_context: expr, UNFUNDED) => {{ - $self.short_to_chan_info.write().unwrap().remove(&$chan_context.outbound_scid_alias()); - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$chan_context.outbound_scid_alias()); - debug_assert!(alias_removed); - }}; - ($self: ident, $closed_channel_monitor_update_ids: expr, $in_flight_monitor_updates: expr, $funded_chan: expr, $shutdown_res_mut: expr, FUNDED) => {{ - if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() { - handle_new_monitor_update_locked_actions_handled_by_caller!( - $self, funding_txo, update, $in_flight_monitor_updates, $funded_chan.context - ); - } - // If there's a possibility that we need to generate further monitor updates for this - // channel, we need to store the last update_id of it. However, we don't want to insert - // into the map (which prevents the `PeerState` from being cleaned up) for channels that - // never even got confirmations (which would open us up to DoS attacks). - let update_id = $funded_chan.context.get_latest_monitor_update_id(); - if $funded_chan.funding.get_funding_tx_confirmation_height().is_some() || $funded_chan.context.minimum_depth(&$funded_chan.funding) == Some(0) || update_id > 1 { - let chan_id = $funded_chan.context.channel_id(); - $closed_channel_monitor_update_ids.insert(chan_id, update_id); - } - let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap(); - if let Some(short_id) = $funded_chan.funding.get_short_channel_id() { - short_to_chan_info.remove(&short_id); - } else { - // If the channel was never confirmed on-chain prior to its closure, remove the - // outbound SCID alias we used for it from the collision-prevention set. While we - // generally want to avoid ever re-using an outbound SCID alias across all channels, we - // also don't want a counterparty to be able to trivially cause a memory leak by simply - // opening a million channels with us which are closed before we ever reach the funding - // stage. - let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$funded_chan.context.outbound_scid_alias()); - debug_assert!(alias_removed); - } - short_to_chan_info.remove(&$funded_chan.context.outbound_scid_alias()); - for scid in $funded_chan.context.historical_scids() { - short_to_chan_info.remove(scid); - } - }} -} - fn convert_channel_err_internal< Close: FnOnce(ClosureReason, &str) -> (ShutdownResult, Option<(msgs::ChannelUpdate, NodeId, NodeId)>), >( @@ -3687,8 +3638,8 @@ fn convert_channel_err_internal< } fn convert_funded_channel_err_internal>( - cm: &CM, closed_update_ids: &mut BTreeMap, - in_flight_updates: &mut BTreeMap)>, + cm: &CM, closed_channel_monitor_update_ids: &mut BTreeMap, + in_flight_monitor_updates: &mut BTreeMap)>, coop_close_shutdown_res: Option, err: ChannelError, chan: &mut FundedChannel, ) -> (bool, MsgHandleErrInternal) @@ -3706,7 +3657,38 @@ where let chan_update = cm.get_channel_update_for_broadcast(chan).ok(); log_error!(logger, "Closed channel due to close-required error: {}", msg); - locked_close_channel!(cm, closed_update_ids, in_flight_updates, chan, shutdown_res, FUNDED); + + if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { + handle_new_monitor_update_locked_actions_handled_by_caller!( + cm, funding_txo, update, in_flight_monitor_updates, chan.context + ); + } + // If there's a possibility that we need to generate further monitor updates for this + // channel, we need to store the last update_id of it. However, we don't want to insert + // into the map (which prevents the `PeerState` from being cleaned up) for channels that + // never even got confirmations (which would open us up to DoS attacks). + let update_id = chan.context.get_latest_monitor_update_id(); + if chan.funding.get_funding_tx_confirmation_height().is_some() || chan.context.minimum_depth(&chan.funding) == Some(0) || update_id > 1 { + closed_channel_monitor_update_ids.insert(chan_id, update_id); + } + let mut short_to_chan_info = cm.short_to_chan_info.write().unwrap(); + if let Some(short_id) = chan.funding.get_short_channel_id() { + short_to_chan_info.remove(&short_id); + } else { + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&chan.context.outbound_scid_alias()); + debug_assert!(alias_removed); + } + short_to_chan_info.remove(&chan.context.outbound_scid_alias()); + for scid in chan.context.historical_scids() { + short_to_chan_info.remove(scid); + } + (shutdown_res, chan_update) }) } @@ -3724,7 +3706,15 @@ where let shutdown_res = chan.force_shutdown(reason); log_error!(logger, "Closed channel due to close-required error: {}", msg); - locked_close_channel!(cm, chan.context(), UNFUNDED); + cm.short_to_chan_info.write().unwrap().remove(&chan.context().outbound_scid_alias()); + // If the channel was never confirmed on-chain prior to its closure, remove the + // outbound SCID alias we used for it from the collision-prevention set. While we + // generally want to avoid ever re-using an outbound SCID alias across all channels, we + // also don't want a counterparty to be able to trivially cause a memory leak by simply + // opening a million channels with us which are closed before we ever reach the funding + // stage. + let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&chan.context().outbound_scid_alias()); + debug_assert!(alias_removed); (shutdown_res, None) }) } @@ -4512,7 +4502,7 @@ where self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver, None); } if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update { - debug_assert!(false, "This should have been handled in `locked_close_channel`"); + debug_assert!(false, "This should have been handled in `convert_channel_err`"); self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update); } if self.background_events_processed_since_startup.load(Ordering::Acquire) { @@ -4520,7 +4510,7 @@ where // not in the startup sequence) check if we need to handle any // `MonitorUpdateCompletionAction`s. // TODO: If we do the `in_flight_monitor_updates.is_empty()` check in - // `locked_close_channel` we can skip the locks here. + // `convert_channel_err` we can skip the locks here. if shutdown_res.channel_funding_txo.is_some() { self.channel_monitor_updated(&shutdown_res.channel_id, None, &shutdown_res.counterparty_node_id); } @@ -10357,10 +10347,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let funded_channel_id = chan.context.channel_id(); macro_rules! fail_chan { ($err: expr) => { { - // Note that at this point we've filled in the funding outpoint on our - // channel, but its actually in conflict with another channel. Thus, if - // we call `convert_channel_err` immediately (thus calling - // `locked_close_channel`), we'll remove the existing channel from `outpoint_to_peer`. + // Note that at this point we've filled in the funding outpoint on our channel, but its + // actually in conflict with another channel. Thus, if we call `convert_channel_err` + // immediately, we'll remove the existing channel from `outpoint_to_peer`. // Thus, we must first unset the funding outpoint on the channel. let err = ChannelError::close($err.to_owned()); chan.unset_funding_info(); From 7cea2e930197b6d31d2ca12b74c96337f9800127 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 17 Nov 2025 14:52:10 +0000 Subject: [PATCH 7/7] Format `convert_*_channel_err` code moved out of macro --- lightning/src/ln/channelmanager.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ff3aed5eba2..8196d886e65 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3660,7 +3660,11 @@ where if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { handle_new_monitor_update_locked_actions_handled_by_caller!( - cm, funding_txo, update, in_flight_monitor_updates, chan.context + cm, + funding_txo, + update, + in_flight_monitor_updates, + chan.context ); } // If there's a possibility that we need to generate further monitor updates for this @@ -3668,7 +3672,9 @@ where // into the map (which prevents the `PeerState` from being cleaned up) for channels that // never even got confirmations (which would open us up to DoS attacks). let update_id = chan.context.get_latest_monitor_update_id(); - if chan.funding.get_funding_tx_confirmation_height().is_some() || chan.context.minimum_depth(&chan.funding) == Some(0) || update_id > 1 { + let funding_confirmed = chan.funding.get_funding_tx_confirmation_height().is_some(); + let chan_zero_conf = chan.context.minimum_depth(&chan.funding) == Some(0); + if funding_confirmed || chan_zero_conf || update_id > 1 { closed_channel_monitor_update_ids.insert(chan_id, update_id); } let mut short_to_chan_info = cm.short_to_chan_info.write().unwrap(); @@ -3681,7 +3687,8 @@ where // also don't want a counterparty to be able to trivially cause a memory leak by simply // opening a million channels with us which are closed before we ever reach the funding // stage. - let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&chan.context.outbound_scid_alias()); + let outbound_alias = chan.context.outbound_scid_alias(); + let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); debug_assert!(alias_removed); } short_to_chan_info.remove(&chan.context.outbound_scid_alias()); @@ -3713,7 +3720,8 @@ where // also don't want a counterparty to be able to trivially cause a memory leak by simply // opening a million channels with us which are closed before we ever reach the funding // stage. - let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&chan.context().outbound_scid_alias()); + let outbound_alias = chan.context().outbound_scid_alias(); + let alias_removed = cm.outbound_scid_aliases.lock().unwrap().remove(&outbound_alias); debug_assert!(alias_removed); (shutdown_res, None) })