@@ -9828,30 +9828,35 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
98289828 }
98299829
98309830 /// TODO(dual_funding): Allow contributions, pass intended amount and inputs
9831- #[rustfmt::skip]
98329831 fn do_accept_inbound_channel(
9833- &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool,
9834- user_channel_id: u128, config_overrides: Option<ChannelConfigOverrides>
9832+ &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey,
9833+ accept_0conf: bool, user_channel_id: u128,
9834+ config_overrides: Option<ChannelConfigOverrides>,
98359835 ) -> Result<(), APIError> {
9836-
98379836 let mut config = self.config.read().unwrap().clone();
98389837
98399838 // Apply configuration overrides.
98409839 if let Some(overrides) = config_overrides {
98419840 config.apply(&overrides);
98429841 };
98439842
9844- let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None);
9843+ let logger = WithContext::from(
9844+ &self.logger,
9845+ Some(*counterparty_node_id),
9846+ Some(*temporary_channel_id),
9847+ None,
9848+ );
98459849 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
98469850
98479851 let peers_without_funded_channels =
9848- self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 } );
9852+ self.peers_without_funded_channels(|peer| peer.total_channel_count() > 0);
98499853 let per_peer_state = self.per_peer_state.read().unwrap();
9850- let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9851- .ok_or_else(|| {
9854+ let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
98529855 log_error!(logger, "Can't find peer matching the passed counterparty node_id");
98539856
9854- let err_str = format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}");
9857+ let err_str = format!(
9858+ "Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"
9859+ );
98559860 APIError::ChannelUnavailable { err: err_str }
98569861 })?;
98579862 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
@@ -9866,50 +9871,71 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
98669871 Some(unaccepted_channel) => {
98679872 let best_block_height = self.best_block.read().unwrap().height;
98689873 match unaccepted_channel.open_channel_msg {
9869- OpenChannelMessage::V1(open_channel_msg) => {
9870- InboundV1Channel::new(
9871- &self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
9872- &self.channel_type_features(), &peer_state.latest_features, &open_channel_msg,
9873- user_channel_id, &config, best_block_height, &self.logger, accept_0conf
9874- ).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id)
9875- ).map(|mut channel| {
9876- let logger = WithChannelContext::from(&self.logger, &channel.context, None);
9877- let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
9874+ OpenChannelMessage::V1(open_channel_msg) => InboundV1Channel::new(
9875+ &self.fee_estimator,
9876+ &self.entropy_source,
9877+ &self.signer_provider,
9878+ *counterparty_node_id,
9879+ &self.channel_type_features(),
9880+ &peer_state.latest_features,
9881+ &open_channel_msg,
9882+ user_channel_id,
9883+ &config,
9884+ best_block_height,
9885+ &self.logger,
9886+ accept_0conf,
9887+ )
9888+ .map_err(|err| {
9889+ MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id)
9890+ })
9891+ .map(|mut channel| {
9892+ let logger = WithChannelContext::from(&self.logger, &channel.context, None);
9893+ let message_send_event =
9894+ channel.accept_inbound_channel(&&logger).map(|msg| {
98789895 MessageSendEvent::SendAcceptChannel {
98799896 node_id: *counterparty_node_id,
98809897 msg,
98819898 }
98829899 });
9883- (*temporary_channel_id, Channel::from(channel), message_send_event)
9884- })
9885- },
9886- OpenChannelMessage::V2(open_channel_msg) => {
9887- PendingV2Channel::new_inbound(
9888- &self.fee_estimator, &self.entropy_source, &self.signer_provider,
9889- self.get_our_node_id(), *counterparty_node_id,
9890- &self.channel_type_features(), &peer_state.latest_features,
9891- &open_channel_msg,
9892- user_channel_id, &config, best_block_height,
9893- &self.logger,
9894- ).map_err(|e| {
9895- let channel_id = open_channel_msg.common_fields.temporary_channel_id;
9896- MsgHandleErrInternal::from_chan_no_close(e, channel_id)
9897- }).map(|channel| {
9898- let message_send_event = MessageSendEvent::SendAcceptChannelV2 {
9899- node_id: channel.context.get_counterparty_node_id(),
9900- msg: channel.accept_inbound_dual_funded_channel()
9901- };
9902- (channel.context.channel_id(), Channel::from(channel), Some(message_send_event))
9903- })
9904- },
9900+ (*temporary_channel_id, Channel::from(channel), message_send_event)
9901+ }),
9902+ OpenChannelMessage::V2(open_channel_msg) => PendingV2Channel::new_inbound(
9903+ &self.fee_estimator,
9904+ &self.entropy_source,
9905+ &self.signer_provider,
9906+ self.get_our_node_id(),
9907+ *counterparty_node_id,
9908+ &self.channel_type_features(),
9909+ &peer_state.latest_features,
9910+ &open_channel_msg,
9911+ user_channel_id,
9912+ &config,
9913+ best_block_height,
9914+ &self.logger,
9915+ )
9916+ .map_err(|e| {
9917+ let channel_id = open_channel_msg.common_fields.temporary_channel_id;
9918+ MsgHandleErrInternal::from_chan_no_close(e, channel_id)
9919+ })
9920+ .map(|channel| {
9921+ let message_send_event = MessageSendEvent::SendAcceptChannelV2 {
9922+ node_id: channel.context.get_counterparty_node_id(),
9923+ msg: channel.accept_inbound_dual_funded_channel(),
9924+ };
9925+ (
9926+ channel.context.channel_id(),
9927+ Channel::from(channel),
9928+ Some(message_send_event),
9929+ )
9930+ }),
99059931 }
99069932 },
99079933 None => {
99089934 let err_str = "No such channel awaiting to be accepted.".to_owned();
99099935 log_error!(logger, "{}", err_str);
99109936
99119937 return Err(APIError::APIMisuseError { err: err_str });
9912- }
9938+ },
99139939 };
99149940
99159941 // We have to match below instead of map_err on the above as in the map_err closure the borrow checker
@@ -9920,13 +9946,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
99209946 mem::drop(peer_state_lock);
99219947 mem::drop(per_peer_state);
99229948 // TODO(dunxen): Find/make less icky way to do this.
9923- match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
9924- Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
9949+ match handle_error!(
9950+ self,
9951+ Result::<(), MsgHandleErrInternal>::Err(err),
9952+ *counterparty_node_id
9953+ ) {
9954+ Ok(_) => {
9955+ unreachable!("`handle_error` only returns Err as we've passed in an Err")
9956+ },
99259957 Err(e) => {
99269958 return Err(APIError::ChannelUnavailable { err: e.err });
99279959 },
99289960 }
9929- }
9961+ },
99309962 };
99319963
99329964 if accept_0conf {
@@ -9935,9 +9967,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
99359967 } else if channel.funding().get_channel_type().requires_zero_conf() {
99369968 let send_msg_err_event = MessageSendEvent::HandleError {
99379969 node_id: channel.context().get_counterparty_node_id(),
9938- action: msgs::ErrorAction::SendErrorMessage{
9939- msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "No zero confirmation channels accepted".to_owned(), }
9940- }
9970+ action: msgs::ErrorAction::SendErrorMessage {
9971+ msg: msgs::ErrorMessage {
9972+ channel_id: *temporary_channel_id,
9973+ data: "No zero confirmation channels accepted".to_owned(),
9974+ },
9975+ },
99419976 };
99429977 debug_assert!(peer_state.is_connected);
99439978 peer_state.pending_msg_events.push(send_msg_err_event);
@@ -9952,13 +9987,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
99529987 if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
99539988 let send_msg_err_event = MessageSendEvent::HandleError {
99549989 node_id: channel.context().get_counterparty_node_id(),
9955- action: msgs::ErrorAction::SendErrorMessage{
9956- msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
9957- }
9990+ action: msgs::ErrorAction::SendErrorMessage {
9991+ msg: msgs::ErrorMessage {
9992+ channel_id: *temporary_channel_id,
9993+ data:
9994+ "Have too many peers with unfunded channels, not accepting new ones"
9995+ .to_owned(),
9996+ },
9997+ },
99589998 };
99599999 debug_assert!(peer_state.is_connected);
996010000 peer_state.pending_msg_events.push(send_msg_err_event);
9961- let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
10001+ let err_str =
10002+ "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
996210003 log_error!(logger, "{}", err_str);
996310004
996410005 return Err(APIError::APIMisuseError { err: err_str });
@@ -13676,7 +13717,6 @@ where
1367613717 provided_init_features(&self.config.read().unwrap())
1367713718 }
1367813719
13679- #[rustfmt::skip]
1368013720 fn peer_disconnected(&self, counterparty_node_id: PublicKey) {
1368113721 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
1368213722 let mut splice_failed_events = Vec::new();
@@ -13784,7 +13824,10 @@ where
1378413824 debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
1378513825 peer_state.is_connected = false;
1378613826 peer_state.ok_to_remove(true)
13787- } else { debug_assert!(false, "Unconnected peer disconnected"); true }
13827+ } else {
13828+ debug_assert!(false, "Unconnected peer disconnected");
13829+ true
13830+ }
1378813831 };
1378913832 if remove_peer {
1379013833 per_peer_state.remove(&counterparty_node_id);
@@ -13809,11 +13852,16 @@ where
1380913852 });
1381013853 }
1381113854
13812- #[rustfmt::skip]
13813- fn peer_connected(&self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
13855+ fn peer_connected(
13856+ &self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool,
13857+ ) -> Result<(), ()> {
1381413858 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
1381513859 if !init_msg.features.supports_static_remote_key() {
13816- log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
13860+ log_debug!(
13861+ logger,
13862+ "Peer {} does not support static remote key, disconnecting",
13863+ log_pubkey!(counterparty_node_id)
13864+ );
1381713865 return Err(());
1381813866 }
1381913867
@@ -13824,8 +13872,10 @@ where
1382413872 // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
1382513873 // unfunded channels taking up space in memory for disconnected peers, we still let new
1382613874 // peers connect, but we'll reject new channels from them.
13827- let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
13828- let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
13875+ let connected_peers_without_funded_channels =
13876+ self.peers_without_funded_channels(|node| node.is_connected);
13877+ let inbound_peer_limited =
13878+ inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
1382913879
1383013880 {
1383113881 let mut peer_state_lock = self.per_peer_state.write().unwrap();
@@ -13853,9 +13903,9 @@ where
1385313903 peer_state.latest_features = init_msg.features.clone();
1385413904
1385513905 let best_block_height = self.best_block.read().unwrap().height;
13856- if inbound_peer_limited &&
13857- Self::unfunded_channel_count(&*peer_state, best_block_height) ==
13858- peer_state.channel_by_id.len()
13906+ if inbound_peer_limited
13907+ && Self::unfunded_channel_count(&*peer_state, best_block_height)
13908+ == peer_state.channel_by_id.len()
1385913909 {
1386013910 res = Err(());
1386113911 return NotifyOption::SkipPersistNoEvents;
@@ -13864,7 +13914,10 @@ where
1386413914 debug_assert!(peer_state.pending_msg_events.is_empty());
1386513915 peer_state.pending_msg_events.clear();
1386613916
13867- debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
13917+ debug_assert!(
13918+ !peer_state.is_connected,
13919+ "A peer shouldn't be connected twice"
13920+ );
1386813921 peer_state.is_connected = true;
1386913922 },
1387013923 }
@@ -13881,27 +13934,26 @@ where
1388113934 if !peer_state.peer_storage.is_empty() {
1388213935 pending_msg_events.push(MessageSendEvent::SendPeerStorageRetrieval {
1388313936 node_id: counterparty_node_id.clone(),
13884- msg: msgs::PeerStorageRetrieval {
13885- data: peer_state.peer_storage.clone()
13886- },
13937+ msg: msgs::PeerStorageRetrieval { data: peer_state.peer_storage.clone() },
1388713938 });
1388813939 }
1388913940
1389013941 for (_, chan) in peer_state.channel_by_id.iter_mut() {
1389113942 let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
1389213943 match chan.peer_connected_get_handshake(self.chain_hash, &&logger) {
13893- ReconnectionMsg::Reestablish(msg) =>
13944+ ReconnectionMsg::Reestablish(msg) => {
1389413945 pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
1389513946 node_id: chan.context().get_counterparty_node_id(),
1389613947 msg,
13897- }),
13898- ReconnectionMsg::Open(OpenChannelMessage::V1(msg)) =>
13899- pending_msg_events.push(MessageSendEvent::SendOpenChannel {
13948+ })
13949+ },
13950+ ReconnectionMsg::Open(OpenChannelMessage::V1(msg)) => pending_msg_events
13951+ .push(MessageSendEvent::SendOpenChannel {
1390013952 node_id: chan.context().get_counterparty_node_id(),
1390113953 msg,
1390213954 }),
13903- ReconnectionMsg::Open(OpenChannelMessage::V2(msg)) =>
13904- pending_msg_events .push(MessageSendEvent::SendOpenChannelV2 {
13955+ ReconnectionMsg::Open(OpenChannelMessage::V2(msg)) => pending_msg_events
13956+ .push(MessageSendEvent::SendOpenChannelV2 {
1390513957 node_id: chan.context().get_counterparty_node_id(),
1390613958 msg,
1390713959 }),
0 commit comments