Skip to content

Commit bce5db7

Browse files
authored
Merge pull request lightningdevkit#3575 from adi2011/peer-storage/channel-manager
PeerStorage: Add feature and store peer storage in ChannelManager
2 parents ec19ba1 + 6c8e7e4 commit bce5db7

File tree

9 files changed

+320
-2
lines changed

9 files changed

+320
-2
lines changed

lightning-net-tokio/src/lib.rs

+5
Original file line numberDiff line numberDiff line change
@@ -761,6 +761,11 @@ mod tests {
761761
fn handle_tx_init_rbf(&self, _their_node_id: PublicKey, _msg: &TxInitRbf) {}
762762
fn handle_tx_ack_rbf(&self, _their_node_id: PublicKey, _msg: &TxAckRbf) {}
763763
fn handle_tx_abort(&self, _their_node_id: PublicKey, _msg: &TxAbort) {}
764+
fn handle_peer_storage(&self, _their_node_id: PublicKey, _msg: PeerStorage) {}
765+
fn handle_peer_storage_retrieval(
766+
&self, _their_node_id: PublicKey, _msg: PeerStorageRetrieval,
767+
) {
768+
}
764769
fn peer_disconnected(&self, their_node_id: PublicKey) {
765770
if their_node_id == self.expected_pubkey {
766771
self.disconnected_flag.store(true, Ordering::SeqCst);

lightning-types/src/features.rs

+22-2
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,8 @@
7272
//! (see the [`Trampoline` feature proposal](https://github.com/lightning/bolts/pull/836) for more information).
7373
//! - `DnsResolver` - supports resolving DNS names to TXT DNSSEC proofs for BIP 353 payments
7474
//! (see [bLIP 32](https://github.com/lightning/blips/blob/master/blip-0032.md) for more information).
75+
//! - `ProvideStorage` - Indicates that we offer the capability to store data of our peers
76+
//! (see https://github.com/lightning/bolts/pull/1110 for more info).
7577
//!
7678
//! LDK knows about the following features, but does not support them:
7779
//! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be
@@ -152,7 +154,7 @@ mod sealed {
152154
// Byte 4
153155
OnionMessages,
154156
// Byte 5
155-
ChannelType | SCIDPrivacy,
157+
ProvideStorage | ChannelType | SCIDPrivacy,
156158
// Byte 6
157159
ZeroConf,
158160
// Byte 7
@@ -173,7 +175,7 @@ mod sealed {
173175
// Byte 4
174176
OnionMessages,
175177
// Byte 5
176-
ChannelType | SCIDPrivacy,
178+
ProvideStorage | ChannelType | SCIDPrivacy,
177179
// Byte 6
178180
ZeroConf | Keysend,
179181
// Byte 7
@@ -544,6 +546,16 @@ mod sealed {
544546
supports_onion_messages,
545547
requires_onion_messages
546548
);
549+
define_feature!(
550+
43,
551+
ProvideStorage,
552+
[InitContext, NodeContext],
553+
"Feature flags for `option_provide_storage`.",
554+
set_provide_storage_optional,
555+
set_provide_storage_required,
556+
supports_provide_storage,
557+
requires_provide_storage
558+
);
547559
define_feature!(
548560
45,
549561
ChannelType,
@@ -1126,6 +1138,14 @@ mod tests {
11261138
assert!(!features1.requires_unknown_bits_from(&features2));
11271139
assert!(!features2.requires_unknown_bits_from(&features1));
11281140

1141+
features1.set_provide_storage_required();
1142+
assert!(features1.requires_unknown_bits_from(&features2));
1143+
assert!(!features2.requires_unknown_bits_from(&features1));
1144+
1145+
features2.set_provide_storage_optional();
1146+
assert!(!features1.requires_unknown_bits_from(&features2));
1147+
assert!(!features2.requires_unknown_bits_from(&features1));
1148+
11291149
features1.set_data_loss_protect_required();
11301150
assert!(features1.requires_unknown_bits_from(&features2));
11311151
assert!(!features2.requires_unknown_bits_from(&features1));

lightning/src/events/mod.rs

+17
Original file line numberDiff line numberDiff line change
@@ -2636,6 +2636,23 @@ pub enum MessageSendEvent {
26362636
/// The gossip_timestamp_filter which should be sent.
26372637
msg: msgs::GossipTimestampFilter,
26382638
},
2639+
/// Sends a channel partner Peer Storage of our backup which they should store.
2640+
/// This should be sent on each new connection to the channel partner or whenever we want
2641+
/// them to update the backup that they store.
2642+
SendPeerStorage {
2643+
/// The node_id of this message recipient
2644+
node_id: PublicKey,
2645+
/// The peer_storage which should be sent.
2646+
msg: msgs::PeerStorage,
2647+
},
2648+
/// Sends a channel partner their own peer storage which we store and update when they send
2649+
/// a [`msgs::PeerStorage`].
2650+
SendPeerStorageRetrieval {
2651+
/// The node_id of this message recipient
2652+
node_id: PublicKey,
2653+
/// The peer_storage_retrieval which should be sent.
2654+
msg: msgs::PeerStorageRetrieval,
2655+
}
26392656
}
26402657

26412658
/// A trait indicating an object may generate message send events

lightning/src/ln/channelmanager.rs

+160
Original file line numberDiff line numberDiff line change
@@ -1404,6 +1404,8 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
14041404
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
14051405
/// [`ChannelMessageHandler::peer_disconnected`].
14061406
pub is_connected: bool,
1407+
/// Holds the peer storage data for the channel partner on a per-peer basis.
1408+
peer_storage: Vec<u8>,
14071409
}
14081410

14091411
impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
@@ -2872,6 +2874,13 @@ const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
28722874
/// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
28732875
const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
28742876

2877+
/// The maximum allowed size for peer storage, in bytes.
2878+
///
2879+
/// This constant defines the upper limit for the size of data
2880+
/// that can be stored for a peer. It is set to 1024 bytes (1 kilobyte)
2881+
/// to prevent excessive resource consumption.
2882+
const MAX_PEER_STORAGE_SIZE: usize = 1024;
2883+
28752884
/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
28762885
/// many peers we reject new (inbound) connections.
28772886
const MAX_NO_CHANNEL_PEERS: usize = 250;
@@ -8269,6 +8278,53 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
82698278
}
82708279
}
82718280

8281+
fn internal_peer_storage_retrieval(&self, counterparty_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) -> Result<(), MsgHandleErrInternal> {
8282+
// TODO: Decrypt and check if have any stale or missing ChannelMonitor.
8283+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
8284+
8285+
log_debug!(logger, "Received unexpected peer_storage_retrieval from {}. This is unusual since we do not yet distribute peer storage. Sending a warning.", log_pubkey!(counterparty_node_id));
8286+
8287+
Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8288+
"Invalid peer_storage_retrieval message received.".into(),
8289+
), ChannelId([0; 32])))
8290+
}
8291+
8292+
fn internal_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) -> Result<(), MsgHandleErrInternal> {
8293+
let per_peer_state = self.per_peer_state.read().unwrap();
8294+
let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
8295+
.ok_or_else(|| {
8296+
debug_assert!(false);
8297+
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), ChannelId([0; 32]))
8298+
})?;
8299+
8300+
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8301+
let peer_state = &mut *peer_state_lock;
8302+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
8303+
8304+
// Check if we have any channels with the peer (Currently we only provide the service to peers we have a channel with).
8305+
if !peer_state.channel_by_id.values().any(|phase| phase.is_funded()) {
8306+
log_debug!(logger, "Ignoring peer storage request from {} as we don't have any funded channels with them.", log_pubkey!(counterparty_node_id));
8307+
return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8308+
"Ignoring peer_storage message, as peer storage is currently supported only for \
8309+
peers with an active funded channel.".into(),
8310+
), ChannelId([0; 32])));
8311+
}
8312+
8313+
#[cfg(not(test))]
8314+
if msg.data.len() > MAX_PEER_STORAGE_SIZE {
8315+
log_debug!(logger, "Sending warning to peer and ignoring peer storage request from {} as its over 1KiB", log_pubkey!(counterparty_node_id));
8316+
8317+
return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8318+
format!("Supports only data up to {} bytes in peer storage.", MAX_PEER_STORAGE_SIZE)
8319+
), ChannelId([0; 32])));
8320+
}
8321+
8322+
log_trace!(logger, "Received peer_storage from {}", log_pubkey!(counterparty_node_id));
8323+
peer_state.peer_storage = msg.data;
8324+
8325+
Ok(())
8326+
}
8327+
82728328
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
82738329
let best_block = *self.best_block.read().unwrap();
82748330
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -11465,6 +11521,16 @@ where
1146511521
let _ = handle_error!(self, self.internal_funding_signed(&counterparty_node_id, msg), counterparty_node_id);
1146611522
}
1146711523

11524+
fn handle_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) {
11525+
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents);
11526+
let _ = handle_error!(self, self.internal_peer_storage(counterparty_node_id, msg), counterparty_node_id);
11527+
}
11528+
11529+
fn handle_peer_storage_retrieval(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorageRetrieval) {
11530+
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents);
11531+
let _ = handle_error!(self, self.internal_peer_storage_retrieval(counterparty_node_id, msg), counterparty_node_id);
11532+
}
11533+
1146811534
fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) {
1146911535
// Note that we never need to persist the updated ChannelManager for an inbound
1147011536
// channel_ready message - while the channel's state will change, any channel_ready message
@@ -11706,6 +11772,10 @@ where
1170611772
&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
1170711773
&events::MessageSendEvent::SendReplyChannelRange { .. } => false,
1170811774
&events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
11775+
11776+
// Peer Storage
11777+
&events::MessageSendEvent::SendPeerStorage { .. } => false,
11778+
&events::MessageSendEvent::SendPeerStorageRetrieval { .. } => false,
1170911779
}
1171011780
});
1171111781
debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
@@ -11758,6 +11828,7 @@ where
1175811828
actions_blocking_raa_monitor_updates: BTreeMap::new(),
1175911829
closed_channel_monitor_update_ids: BTreeMap::new(),
1176011830
is_connected: true,
11831+
peer_storage: Vec::new(),
1176111832
}));
1176211833
},
1176311834
hash_map::Entry::Occupied(e) => {
@@ -11787,6 +11858,15 @@ where
1178711858
let peer_state = &mut *peer_state_lock;
1178811859
let pending_msg_events = &mut peer_state.pending_msg_events;
1178911860

11861+
if !peer_state.peer_storage.is_empty() {
11862+
pending_msg_events.push(events::MessageSendEvent::SendPeerStorageRetrieval {
11863+
node_id: counterparty_node_id.clone(),
11864+
msg: msgs::PeerStorageRetrieval {
11865+
data: peer_state.peer_storage.clone()
11866+
},
11867+
});
11868+
}
11869+
1179011870
for (_, chan) in peer_state.channel_by_id.iter_mut() {
1179111871
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
1179211872
match chan.peer_connected_get_handshake(self.chain_hash, &&logger) {
@@ -12473,6 +12553,7 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
1247312553
features.set_scid_privacy_optional();
1247412554
features.set_zero_conf_optional();
1247512555
features.set_route_blinding_optional();
12556+
features.set_provide_storage_optional();
1247612557
if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
1247712558
features.set_anchors_zero_fee_htlc_tx_optional();
1247812559
}
@@ -12994,6 +13075,8 @@ where
1299413075
peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
1299513076
}
1299613077

13078+
let mut peer_storage_dir: Vec<(&PublicKey, &Vec<u8>)> = Vec::new();
13079+
1299713080
(serializable_peer_count).write(writer)?;
1299813081
for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
1299913082
// Peers which we have no channels to should be dropped once disconnected. As we
@@ -13003,6 +13086,8 @@ where
1300313086
if !peer_state.ok_to_remove(false) {
1300413087
peer_pubkey.write(writer)?;
1300513088
peer_state.latest_features.write(writer)?;
13089+
peer_storage_dir.push((peer_pubkey, &peer_state.peer_storage));
13090+
1300613091
if !peer_state.monitor_update_blocked_actions.is_empty() {
1300713092
monitor_update_blocked_actions_per_peer
1300813093
.get_or_insert_with(Vec::new)
@@ -13124,6 +13209,7 @@ where
1312413209
(14, decode_update_add_htlcs_opt, option),
1312513210
(15, self.inbound_payment_id_secret, required),
1312613211
(17, in_flight_monitor_updates, required),
13212+
(19, peer_storage_dir, optional_vec),
1312713213
});
1312813214

1312913215
Ok(())
@@ -13356,6 +13442,7 @@ where
1335613442
monitor_update_blocked_actions: BTreeMap::new(),
1335713443
actions_blocking_raa_monitor_updates: BTreeMap::new(),
1335813444
closed_channel_monitor_update_ids: BTreeMap::new(),
13445+
peer_storage: Vec::new(),
1335913446
is_connected: false,
1336013447
}
1336113448
};
@@ -13651,6 +13738,7 @@ where
1365113738
let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, ChannelId), Vec<ChannelMonitorUpdate>>> = None;
1365213739
let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
1365313740
let mut inbound_payment_id_secret = None;
13741+
let mut peer_storage_dir: Option<Vec<(PublicKey, Vec<u8>)>> = None;
1365413742
read_tlv_fields!(reader, {
1365513743
(1, pending_outbound_payments_no_retry, option),
1365613744
(2, pending_intercepted_htlcs, option),
@@ -13667,8 +13755,10 @@ where
1366713755
(14, decode_update_add_htlcs, option),
1366813756
(15, inbound_payment_id_secret, option),
1366913757
(17, in_flight_monitor_updates, required),
13758+
(19, peer_storage_dir, optional_vec),
1367013759
});
1367113760
let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
13761+
let peer_storage_dir: Vec<(PublicKey, Vec<u8>)> = peer_storage_dir.unwrap_or_else(Vec::new);
1367213762
if fake_scid_rand_bytes.is_none() {
1367313763
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
1367413764
}
@@ -13700,6 +13790,12 @@ where
1370013790
}
1370113791
let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap());
1370213792

13793+
for (peer_pubkey, peer_storage) in peer_storage_dir {
13794+
if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
13795+
peer_state.get_mut().unwrap().peer_storage = peer_storage;
13796+
}
13797+
}
13798+
1370313799
// Handle transitioning from the legacy TLV to the new one on upgrades.
1370413800
if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates {
1370513801
// We should never serialize an empty map.
@@ -14774,6 +14870,70 @@ mod tests {
1477414870
}
1477514871
}
1477614872

14873+
#[test]
14874+
fn test_peer_storage() {
14875+
let chanmon_cfgs = create_chanmon_cfgs(2);
14876+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14877+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14878+
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14879+
14880+
create_announced_chan_between_nodes(&nodes, 0, 1);
14881+
14882+
// Since we do not send peer storage, we manually simulate receiving a dummy
14883+
// `PeerStorage` from the channel partner.
14884+
nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msgs::PeerStorage{data: vec![0; 100]});
14885+
14886+
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
14887+
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
14888+
14889+
nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init {
14890+
features: nodes[1].node.init_features(), networks: None, remote_network_address: None
14891+
}, true).unwrap();
14892+
nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
14893+
features: nodes[0].node.init_features(), networks: None, remote_network_address: None
14894+
}, false).unwrap();
14895+
14896+
let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
14897+
assert_eq!(node_0_events.len(), 2);
14898+
14899+
for msg in node_0_events{
14900+
if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
14901+
nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), msg);
14902+
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
14903+
} else if let MessageSendEvent::SendPeerStorageRetrieval { ref node_id, ref msg } = msg {
14904+
nodes[1].node.handle_peer_storage_retrieval(nodes[0].node.get_our_node_id(), msg.clone());
14905+
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
14906+
} else {
14907+
panic!("Unexpected event")
14908+
}
14909+
}
14910+
14911+
let msg_events_after_peer_storage_retrieval = nodes[1].node.get_and_clear_pending_msg_events();
14912+
14913+
// Check if we receive a warning message.
14914+
let peer_storage_warning: Vec<&MessageSendEvent> = msg_events_after_peer_storage_retrieval
14915+
.iter()
14916+
.filter(|event| match event {
14917+
MessageSendEvent::HandleError { .. } => true,
14918+
_ => false,
14919+
})
14920+
.collect();
14921+
14922+
assert_eq!(peer_storage_warning.len(), 1);
14923+
14924+
match peer_storage_warning[0] {
14925+
MessageSendEvent::HandleError { node_id, action } => {
14926+
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
14927+
match action {
14928+
ErrorAction::SendWarningMessage { msg, .. } =>
14929+
assert_eq!(msg.data, "Invalid peer_storage_retrieval message received.".to_owned()),
14930+
_ => panic!("Unexpected error action"),
14931+
}
14932+
}
14933+
_ => panic!("Unexpected event"),
14934+
}
14935+
}
14936+
1477714937
#[test]
1477814938
fn test_keysend_dup_payment_hash() {
1477914939
// (1): Test that a keysend payment with a duplicate payment hash to an existing pending

lightning/src/ln/functional_test_utils.rs

+6
Original file line numberDiff line numberDiff line change
@@ -880,6 +880,12 @@ macro_rules! get_htlc_update_msgs {
880880
/// such messages are intended to all peers.
881881
pub fn remove_first_msg_event_to_node(msg_node_id: &PublicKey, msg_events: &mut Vec<MessageSendEvent>) -> MessageSendEvent {
882882
let ev_index = msg_events.iter().position(|e| { match e {
883+
MessageSendEvent::SendPeerStorage { node_id, .. } => {
884+
node_id == msg_node_id
885+
},
886+
MessageSendEvent::SendPeerStorageRetrieval { node_id, .. } => {
887+
node_id == msg_node_id
888+
},
883889
MessageSendEvent::SendAcceptChannel { node_id, .. } => {
884890
node_id == msg_node_id
885891
},

0 commit comments

Comments
 (0)