@@ -2073,10 +2073,15 @@ macro_rules! handle_new_monitor_update {
2073
2073
($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => {
2074
2074
if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
2075
2075
handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state, $per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, {
2076
- if let ChannelPhase::Funded(chan) = $chan_entry.remove() { chan } else { unreachable!() }
2076
+ unwrap_enum_value!($chan_entry.remove(), ChannelPhase::Funded(chan) => chan)
2077
2077
})
2078
2078
} else {
2079
- unreachable!();
2079
+ // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
2080
+ // update).
2081
+ debug_assert!(true);
2082
+ Err(MsgHandleErrInternal::send_err_msg_no_close(
2083
+ "Cannot update monitor for unfunded channels as they don't have monitors yet".into(),
2084
+ *$chan_entry.key()))
2080
2085
}
2081
2086
};
2082
2087
($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
@@ -2529,47 +2534,38 @@ where
2529
2534
2530
2535
match peer_state.channel_by_id.entry(channel_id.clone()) {
2531
2536
hash_map::Entry::Occupied(mut chan_phase_entry) => {
2532
- match chan_phase_entry.get_mut() {
2533
- ChannelPhase::Funded(chan) => {
2534
- let funding_txo_opt = chan.context.get_funding_txo();
2535
- let their_features = &peer_state.latest_features;
2536
- let (shutdown_msg, mut monitor_update_opt, htlcs) =
2537
- chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
2538
- failed_htlcs = htlcs;
2539
-
2540
- // We can send the `shutdown` message before updating the `ChannelMonitor`
2541
- // here as we don't need the monitor update to complete until we send a
2542
- // `shutdown_signed`, which we'll delay if we're pending a monitor update.
2543
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
2544
- node_id: *counterparty_node_id,
2545
- msg: shutdown_msg,
2546
- });
2537
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
2538
+ let funding_txo_opt = chan.context.get_funding_txo();
2539
+ let their_features = &peer_state.latest_features;
2540
+ let (shutdown_msg, mut monitor_update_opt, htlcs) =
2541
+ chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
2542
+ failed_htlcs = htlcs;
2543
+
2544
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
2545
+ // here as we don't need the monitor update to complete until we send a
2546
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
2547
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
2548
+ node_id: *counterparty_node_id,
2549
+ msg: shutdown_msg,
2550
+ });
2547
2551
2548
- // Update the monitor with the shutdown script if necessary.
2549
- if let Some(monitor_update) = monitor_update_opt.take() {
2550
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2551
- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
2552
- }
2552
+ // Update the monitor with the shutdown script if necessary.
2553
+ if let Some(monitor_update) = monitor_update_opt.take() {
2554
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2555
+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
2556
+ }
2553
2557
2554
- if chan.is_shutdown() {
2555
- if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
2556
- if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
2557
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2558
- msg: channel_update
2559
- });
2560
- }
2561
- self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
2558
+ if chan.is_shutdown() {
2559
+ if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
2560
+ if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
2561
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2562
+ msg: channel_update
2563
+ });
2562
2564
}
2565
+ self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
2563
2566
}
2564
- break Ok(());
2565
- },
2566
- _ => {
2567
- // If we reach this point, it means that the channel_id either refers to an unfunded channel or
2568
- // it does not exist for this peer. Either way, we can attempt to force-close it.
2569
- //
2570
- // An appropriate error will be returned for non-existence of the channel if that's the case.
2571
- return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
2572
- },
2567
+ }
2568
+ break Ok(());
2573
2569
}
2574
2570
},
2575
2571
hash_map::Entry::Vacant(_) => (),
@@ -2580,8 +2576,6 @@ where
2580
2576
//
2581
2577
// An appropriate error will be returned for non-existence of the channel if that's the case.
2582
2578
return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
2583
- // TODO(dunxen): This is still not ideal as we're doing some extra lookups.
2584
- // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422
2585
2579
};
2586
2580
2587
2581
for htlc_source in failed_htlcs.drain(..) {
@@ -3043,10 +3037,13 @@ where
3043
3037
}
3044
3038
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
3045
3039
let peer_state = &mut *peer_state_lock;
3046
- let opt_chan_phase = peer_state.channel_by_id.get_mut(&forwarding_id);
3047
- let chan = match opt_chan_phase.iter().find_map(
3040
+ let chan = match peer_state.channel_by_id.get_mut(&forwarding_id).map(
3048
3041
|chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
3049
- ) {
3042
+ ).flatten() {
3043
+ // let opt_chan_phase = peer_state.channel_by_id.get_mut(&forwarding_id);
3044
+ // let chan = match opt_chan_phase.iter().find_map(
3045
+ // |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
3046
+ // ) {
3050
3047
None => {
3051
3048
// Channel was removed. The short_to_chan_info and channel_by_id maps
3052
3049
// have no consistency guarantees.
@@ -3857,7 +3854,7 @@ where
3857
3854
next_hop_channel_id, next_node_id)
3858
3855
}),
3859
3856
None => return Err(APIError::ChannelUnavailable {
3860
- err: format!("Funded channel with id {} not found for the passed counterparty node_id {}.",
3857
+ err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
3861
3858
next_hop_channel_id, next_node_id)
3862
3859
})
3863
3860
}
@@ -4417,10 +4414,10 @@ where
4417
4414
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4418
4415
let peer_state = &mut *peer_state_lock;
4419
4416
match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
4420
- hash_map::Entry::Occupied(mut chan_phase_phase ) => {
4417
+ hash_map::Entry::Occupied(mut chan_phase ) => {
4421
4418
updated_chan = true;
4422
4419
handle_new_monitor_update!(self, funding_txo, update.clone(),
4423
- peer_state_lock, peer_state, per_peer_state, chan_phase_phase ).map(|_| ())
4420
+ peer_state_lock, peer_state, per_peer_state, chan_phase ).map(|_| ())
4424
4421
},
4425
4422
hash_map::Entry::Vacant(_) => Ok(()),
4426
4423
}
0 commit comments