Skip to content

Commit 9ed5ae7

Browse files
committed
Move unlocked part of handle_monitor_update_completion into an fn
The `handle_monitor_update_completion` macro is called from a number of places in `channelmanager.rs` and dumps quite a bit of code into each callsite. Here we take the unlocked half of `handle_monitor_update_completion` and move it into a function in `ChannelManager`. As a result, building the `lightning` crate tests after a single-line `println` change in `channelmanager.rs` was reduced by around a full second (out of ~28.5 originally) on my machine using rustc 1.85.0. Memory usage of the `expand_crate` step went from +554MB to +548MB in the same test.
1 parent 6d0254c commit 9ed5ae7

File tree

1 file changed

+79
-61
lines changed

1 file changed

+79
-61
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 79 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -3531,7 +3531,7 @@ macro_rules! handle_monitor_update_completion {
35313531
assert_eq!($chan.blocked_monitor_updates_pending(), 0);
35323532
}
35333533
let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3534-
let mut updates = $chan.monitor_updating_restored(&&logger,
3534+
let updates = $chan.monitor_updating_restored(&&logger,
35353535
&$self.node_signer, $self.chain_hash, &*$self.config.read().unwrap(),
35363536
$self.best_block.read().unwrap().height,
35373537
|htlc_id| $self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &counterparty_node_id));
@@ -3569,66 +3569,11 @@ macro_rules! handle_monitor_update_completion {
35693569
core::mem::drop($peer_state_lock);
35703570
core::mem::drop($per_peer_state_lock);
35713571

3572-
// If the channel belongs to a batch funding transaction, the progress of the batch
3573-
// should be updated as we have received funding_signed and persisted the monitor.
3574-
if let Some(txid) = unbroadcasted_batch_funding_txid {
3575-
let mut funding_batch_states = $self.funding_batch_states.lock().unwrap();
3576-
let mut batch_completed = false;
3577-
if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
3578-
let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
3579-
*chan_id == channel_id &&
3580-
*pubkey == counterparty_node_id
3581-
));
3582-
if let Some(channel_state) = channel_state {
3583-
channel_state.2 = true;
3584-
} else {
3585-
debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
3586-
}
3587-
batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
3588-
} else {
3589-
debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
3590-
}
3591-
3592-
// When all channels in a batched funding transaction have become ready, it is not necessary
3593-
// to track the progress of the batch anymore and the state of the channels can be updated.
3594-
if batch_completed {
3595-
let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
3596-
let per_peer_state = $self.per_peer_state.read().unwrap();
3597-
let mut batch_funding_tx = None;
3598-
for (channel_id, counterparty_node_id, _) in removed_batch_state {
3599-
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
3600-
let mut peer_state = peer_state_mutex.lock().unwrap();
3601-
if let Some(funded_chan) = peer_state.channel_by_id
3602-
.get_mut(&channel_id)
3603-
.and_then(Channel::as_funded_mut)
3604-
{
3605-
batch_funding_tx = batch_funding_tx.or_else(|| funded_chan.context.unbroadcasted_funding(&funded_chan.funding));
3606-
funded_chan.set_batch_ready();
3607-
let mut pending_events = $self.pending_events.lock().unwrap();
3608-
emit_channel_pending_event!(pending_events, funded_chan);
3609-
}
3610-
}
3611-
}
3612-
if let Some(tx) = batch_funding_tx {
3613-
log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid());
3614-
$self.tx_broadcaster.broadcast_transactions(&[&tx]);
3615-
}
3616-
}
3617-
}
3618-
3619-
$self.handle_monitor_update_completion_actions(update_actions);
3620-
3621-
if let Some(forwards) = htlc_forwards {
3622-
$self.forward_htlcs(&mut [forwards][..]);
3623-
}
3624-
if let Some(decode) = decode_update_add_htlcs {
3625-
$self.push_decode_update_add_htlcs(decode);
3626-
}
3627-
$self.finalize_claims(updates.finalized_claimed_htlcs);
3628-
for failure in updates.failed_htlcs.drain(..) {
3629-
let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id };
3630-
$self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None);
3631-
}
3572+
$self.post_monitor_update_unlock(
3573+
channel_id, counterparty_node_id, unbroadcasted_batch_funding_txid, update_actions,
3574+
htlc_forwards, decode_update_add_htlcs, updates.finalized_claimed_htlcs,
3575+
updates.failed_htlcs,
3576+
);
36323577
} }
36333578
}
36343579

@@ -9365,6 +9310,79 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
93659310
self.our_network_pubkey
93669311
}
93679312

9313+
/// Handles actions which need to complete after a [`ChannelMonitorUpdate`] has been applied
9314+
/// which can happen after the per-peer state lock has been dropped.
9315+
fn post_monitor_update_unlock(
9316+
&self, channel_id: ChannelId, counterparty_node_id: PublicKey,
9317+
unbroadcasted_batch_funding_txid: Option<Txid>,
9318+
update_actions: Vec<MonitorUpdateCompletionAction>,
9319+
htlc_forwards: Option<PerSourcePendingForward>,
9320+
decode_update_add_htlcs: Option<(u64, Vec<msgs::UpdateAddHTLC>)>,
9321+
finalized_claimed_htlcs: Vec<(HTLCSource, Option<AttributionData>)>,
9322+
failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
9323+
) {
9324+
// If the channel belongs to a batch funding transaction, the progress of the batch
9325+
// should be updated as we have received funding_signed and persisted the monitor.
9326+
if let Some(txid) = unbroadcasted_batch_funding_txid {
9327+
let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
9328+
let mut batch_completed = false;
9329+
if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
9330+
let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
9331+
*chan_id == channel_id &&
9332+
*pubkey == counterparty_node_id
9333+
));
9334+
if let Some(channel_state) = channel_state {
9335+
channel_state.2 = true;
9336+
} else {
9337+
debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
9338+
}
9339+
batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
9340+
} else {
9341+
debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
9342+
}
9343+
9344+
// When all channels in a batched funding transaction have become ready, it is not necessary
9345+
// to track the progress of the batch anymore and the state of the channels can be updated.
9346+
if batch_completed {
9347+
let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
9348+
let per_peer_state = self.per_peer_state.read().unwrap();
9349+
let mut batch_funding_tx = None;
9350+
for (channel_id, counterparty_node_id, _) in removed_batch_state {
9351+
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9352+
let mut peer_state = peer_state_mutex.lock().unwrap();
9353+
if let Some(funded_chan) = peer_state.channel_by_id
9354+
.get_mut(&channel_id)
9355+
.and_then(Channel::as_funded_mut)
9356+
{
9357+
batch_funding_tx = batch_funding_tx.or_else(|| funded_chan.context.unbroadcasted_funding(&funded_chan.funding));
9358+
funded_chan.set_batch_ready();
9359+
let mut pending_events = self.pending_events.lock().unwrap();
9360+
emit_channel_pending_event!(pending_events, funded_chan);
9361+
}
9362+
}
9363+
}
9364+
if let Some(tx) = batch_funding_tx {
9365+
log_info!(self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid());
9366+
self.tx_broadcaster.broadcast_transactions(&[&tx]);
9367+
}
9368+
}
9369+
}
9370+
9371+
self.handle_monitor_update_completion_actions(update_actions);
9372+
9373+
if let Some(forwards) = htlc_forwards {
9374+
self.forward_htlcs(&mut [forwards][..]);
9375+
}
9376+
if let Some(decode) = decode_update_add_htlcs {
9377+
self.push_decode_update_add_htlcs(decode);
9378+
}
9379+
self.finalize_claims(finalized_claimed_htlcs);
9380+
for failure in failed_htlcs {
9381+
let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id };
9382+
self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None);
9383+
}
9384+
}
9385+
93689386
#[rustfmt::skip]
93699387
fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
93709388
debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);

0 commit comments

Comments
 (0)