Skip to content

Commit c484bd7

Browse files
committed
Determine if we have lost data
Deserialise the ChannelMonitors and compare the data to determine if we have lost some states.
1 parent e27f621 commit c484bd7

File tree

1 file changed

+54
-2
lines changed

1 file changed

+54
-2
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 54 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ use crate::events::{
5858
use crate::events::{FundingInfo, PaidBolt12Invoice};
5959
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
6060
// construct one themselves.
61+
use crate::io;
6162
use crate::ln::channel::PendingV2Channel;
6263
use crate::ln::channel::{
6364
self, Channel, ChannelError, ChannelUpdateStatus, FundedChannel, InboundV1Channel,
@@ -78,7 +79,7 @@ use crate::ln::onion_payment::{
7879
};
7980
use crate::ln::onion_utils::{self};
8081
use crate::ln::onion_utils::{HTLCFailReason, LocalHTLCFailureReason};
81-
use crate::ln::our_peer_storage::EncryptedOurPeerStorage;
82+
use crate::ln::our_peer_storage::{EncryptedOurPeerStorage, PeerStorageMonitorHolderList};
8283
#[cfg(test)]
8384
use crate::ln::outbound_payment;
8485
use crate::ln::outbound_payment::{
@@ -174,7 +175,6 @@ use lightning_invoice::{
174175

175176
use alloc::collections::{btree_map, BTreeMap};
176177

177-
use crate::io;
178178
use crate::io::Read;
179179
use crate::prelude::*;
180180
use crate::sync::{Arc, FairRwLock, LockHeldState, LockTestExt, Mutex, RwLock, RwLockReadGuard};
@@ -3013,6 +3013,7 @@ pub(super) const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
30133013
/// This constant defines the upper limit for the size of data
30143014
/// that can be stored for a peer. It is set to 1024 bytes (1 kilobyte)
30153015
/// to prevent excessive resource consumption.
3016+
#[cfg(not(test))]
30163017
const MAX_PEER_STORAGE_SIZE: usize = 1024;
30173018

30183019
/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
@@ -9076,6 +9077,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
90769077
&self, peer_node_id: PublicKey, msg: msgs::PeerStorageRetrieval,
90779078
) -> Result<(), MsgHandleErrInternal> {
90789079
// TODO: Check if have any stale or missing ChannelMonitor.
9080+
let per_peer_state = self.per_peer_state.read().unwrap();
90799081
let logger = WithContext::from(&self.logger, Some(peer_node_id), None, None);
90809082
let err = || {
90819083
MsgHandleErrInternal::from_chan_no_close(
@@ -9102,6 +9104,55 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
91029104

91039105
log_trace!(logger, "Got valid {}-byte peer backup from {}", decrypted.len(), peer_node_id);
91049106

9107+
let mut cursor = io::Cursor::new(decrypted);
9108+
match <PeerStorageMonitorHolderList as Readable>::read(&mut cursor) {
9109+
Ok(mon_list) => {
9110+
for mon_holder in mon_list.monitors.iter() {
9111+
let peer_state_mutex =
9112+
match per_peer_state.get(&mon_holder.counterparty_node_id) {
9113+
Some(mutex) => mutex,
9114+
None => {
9115+
log_debug!(
9116+
logger,
9117+
"Not able to find peer_state for the counterparty {}, channelId {}",
9118+
log_pubkey!(mon_holder.counterparty_node_id),
9119+
mon_holder.channel_id
9120+
);
9121+
continue;
9122+
},
9123+
};
9124+
9125+
let peer_state_lock = peer_state_mutex.lock().unwrap();
9126+
let peer_state = &*peer_state_lock;
9127+
9128+
match peer_state.channel_by_id.get(&mon_holder.channel_id) {
9129+
Some(chan) => {
9130+
if let Some(funded_chan) = chan.as_funded() {
9131+
if funded_chan
9132+
.get_revoked_counterparty_commitment_transaction_number()
9133+
> mon_holder.min_seen_secret
9134+
{
9135+
panic!(
9136+
"Lost channel state for channel {}.
9137+
Received peer storage with a more recent state than what our node had.
9138+
Use the FundRecoverer to initiate a force close and sweep the funds.",
9139+
&mon_holder.channel_id
9140+
);
9141+
}
9142+
}
9143+
},
9144+
None => {
9145+
// TODO: Figure out if this channel is so old that we have forgotten about it.
9146+
panic!("Lost a channel {}", &mon_holder.channel_id);
9147+
},
9148+
}
9149+
}
9150+
},
9151+
9152+
Err(e) => {
9153+
panic!("Wrong serialisation of PeerStorageMonitorHolderList: {}", e);
9154+
},
9155+
}
91059156
Ok(())
91069157
}
91079158

@@ -9127,6 +9178,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
91279178
), ChannelId([0; 32])));
91289179
}
91299180

9181+
#[cfg(not(test))]
91309182
if msg.data.len() > MAX_PEER_STORAGE_SIZE {
91319183
log_debug!(logger, "Sending warning to peer and ignoring peer storage request from {} as its over 1KiB", log_pubkey!(counterparty_node_id));
91329184

0 commit comments

Comments
 (0)