diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 2ff88bc066a..6c6d8a716d9 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -761,6 +761,11 @@ mod tests { fn handle_tx_init_rbf(&self, _their_node_id: PublicKey, _msg: &TxInitRbf) {} fn handle_tx_ack_rbf(&self, _their_node_id: PublicKey, _msg: &TxAckRbf) {} fn handle_tx_abort(&self, _their_node_id: PublicKey, _msg: &TxAbort) {} + fn handle_peer_storage(&self, _their_node_id: PublicKey, _msg: PeerStorage) {} + fn handle_peer_storage_retrieval( + &self, _their_node_id: PublicKey, _msg: PeerStorageRetrieval, + ) { + } fn peer_disconnected(&self, their_node_id: PublicKey) { if their_node_id == self.expected_pubkey { self.disconnected_flag.store(true, Ordering::SeqCst); diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index 2dd9b14bd35..12b3a1f35e5 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -72,6 +72,8 @@ //! (see the [`Trampoline` feature proposal](https://github.com/lightning/bolts/pull/836) for more information). //! - `DnsResolver` - supports resolving DNS names to TXT DNSSEC proofs for BIP 353 payments //! (see [bLIP 32](https://github.com/lightning/blips/blob/master/blip-0032.md) for more information). +//! - `ProvideStorage` - Indicates that we offer the capability to store data of our peers +//! (see https://github.com/lightning/bolts/pull/1110 for more info). //! //! LDK knows about the following features, but does not support them: //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be @@ -152,7 +154,7 @@ mod sealed { // Byte 4 OnionMessages, // Byte 5 - ChannelType | SCIDPrivacy, + ProvideStorage | ChannelType | SCIDPrivacy, // Byte 6 ZeroConf, // Byte 7 @@ -173,7 +175,7 @@ mod sealed { // Byte 4 OnionMessages, // Byte 5 - ChannelType | SCIDPrivacy, + ProvideStorage | ChannelType | SCIDPrivacy, // Byte 6 ZeroConf | Keysend, // Byte 7 @@ -544,6 +546,16 @@ mod sealed { supports_onion_messages, requires_onion_messages ); + define_feature!( + 43, + ProvideStorage, + [InitContext, NodeContext], + "Feature flags for `option_provide_storage`.", + set_provide_storage_optional, + set_provide_storage_required, + supports_provide_storage, + requires_provide_storage + ); define_feature!( 45, ChannelType, @@ -1126,6 +1138,14 @@ mod tests { assert!(!features1.requires_unknown_bits_from(&features2)); assert!(!features2.requires_unknown_bits_from(&features1)); + features1.set_provide_storage_required(); + assert!(features1.requires_unknown_bits_from(&features2)); + assert!(!features2.requires_unknown_bits_from(&features1)); + + features2.set_provide_storage_optional(); + assert!(!features1.requires_unknown_bits_from(&features2)); + assert!(!features2.requires_unknown_bits_from(&features1)); + features1.set_data_loss_protect_required(); assert!(features1.requires_unknown_bits_from(&features2)); assert!(!features2.requires_unknown_bits_from(&features1)); diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 6cf966c7214..b5dad96a979 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -2636,6 +2636,23 @@ pub enum MessageSendEvent { /// The gossip_timestamp_filter which should be sent. msg: msgs::GossipTimestampFilter, }, + /// Sends a channel partner Peer Storage of our backup which they should store. + /// This should be sent on each new connection to the channel partner or whenever we want + /// them to update the backup that they store. + SendPeerStorage { + /// The node_id of this message recipient + node_id: PublicKey, + /// The peer_storage which should be sent. + msg: msgs::PeerStorage, + }, + /// Sends a channel partner their own peer storage which we store and update when they send + /// a [`msgs::PeerStorage`]. + SendPeerStorageRetrieval { + /// The node_id of this message recipient + node_id: PublicKey, + /// The peer_storage_retrieval which should be sent. + msg: msgs::PeerStorageRetrieval, + } } /// A trait indicating an object may generate message send events diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 57ed9898e5d..91df6a7099c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1404,6 +1404,8 @@ pub(super) struct PeerState where SP::Target: SignerProvider { /// [`ChannelMessageHandler::peer_connected`] and no corresponding /// [`ChannelMessageHandler::peer_disconnected`]. pub is_connected: bool, + /// Holds the peer storage data for the channel partner on a per-peer basis. + peer_storage: Vec, } impl PeerState where SP::Target: SignerProvider { @@ -2872,6 +2874,13 @@ const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4; /// this many peers we reject new (inbound) channels from peers with which we don't have a channel. const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50; +/// The maximum allowed size for peer storage, in bytes. +/// +/// This constant defines the upper limit for the size of data +/// that can be stored for a peer. It is set to 1024 bytes (1 kilobyte) +/// to prevent excessive resource consumption. +const MAX_PEER_STORAGE_SIZE: usize = 1024; + /// The maximum number of peers which we do not have a (funded) channel with. Once we reach this /// many peers we reject new (inbound) connections. const MAX_NO_CHANNEL_PEERS: usize = 250; @@ -8245,6 +8254,53 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } + fn internal_peer_storage_retrieval(&self, counterparty_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) -> Result<(), MsgHandleErrInternal> { + // TODO: Decrypt and check if have any stale or missing ChannelMonitor. + let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None); + + log_debug!(logger, "Received unexpected peer_storage_retrieval from {}. This is unusual since we do not yet distribute peer storage. Sending a warning.", log_pubkey!(counterparty_node_id)); + + Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn( + "Invalid peer_storage_retrieval message received.".into(), + ), ChannelId([0; 32]))) + } + + fn internal_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) -> Result<(), MsgHandleErrInternal> { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex = per_peer_state.get(&counterparty_node_id) + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), ChannelId([0; 32])) + })?; + + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &mut *peer_state_lock; + let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None); + + // Check if we have any channels with the peer (Currently we only provide the service to peers we have a channel with). + if !peer_state.channel_by_id.values().any(|phase| phase.is_funded()) { + log_debug!(logger, "Ignoring peer storage request from {} as we don't have any funded channels with them.", log_pubkey!(counterparty_node_id)); + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn( + "Ignoring peer_storage message, as peer storage is currently supported only for \ + peers with an active funded channel.".into(), + ), ChannelId([0; 32]))); + } + + #[cfg(not(test))] + if msg.data.len() > MAX_PEER_STORAGE_SIZE { + log_debug!(logger, "Sending warning to peer and ignoring peer storage request from {} as its over 1KiB", log_pubkey!(counterparty_node_id)); + + return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn( + format!("Supports only data up to {} bytes in peer storage.", MAX_PEER_STORAGE_SIZE) + ), ChannelId([0; 32]))); + } + + log_trace!(logger, "Received peer_storage from {}", log_pubkey!(counterparty_node_id)); + peer_state.peer_storage = msg.data; + + Ok(()) + } + fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> { let best_block = *self.best_block.read().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); @@ -11443,6 +11499,16 @@ where let _ = handle_error!(self, self.internal_funding_signed(&counterparty_node_id, msg), counterparty_node_id); } + fn handle_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) { + let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents); + let _ = handle_error!(self, self.internal_peer_storage(counterparty_node_id, msg), counterparty_node_id); + } + + fn handle_peer_storage_retrieval(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorageRetrieval) { + let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents); + let _ = handle_error!(self, self.internal_peer_storage_retrieval(counterparty_node_id, msg), counterparty_node_id); + } + fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) { // Note that we never need to persist the updated ChannelManager for an inbound // channel_ready message - while the channel's state will change, any channel_ready message @@ -11684,6 +11750,10 @@ where &events::MessageSendEvent::SendShortIdsQuery { .. } => false, &events::MessageSendEvent::SendReplyChannelRange { .. } => false, &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false, + + // Peer Storage + &events::MessageSendEvent::SendPeerStorage { .. } => false, + &events::MessageSendEvent::SendPeerStorageRetrieval { .. } => false, } }); debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect"); @@ -11736,6 +11806,7 @@ where actions_blocking_raa_monitor_updates: BTreeMap::new(), closed_channel_monitor_update_ids: BTreeMap::new(), is_connected: true, + peer_storage: Vec::new(), })); }, hash_map::Entry::Occupied(e) => { @@ -11765,6 +11836,15 @@ where let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; + if !peer_state.peer_storage.is_empty() { + pending_msg_events.push(events::MessageSendEvent::SendPeerStorageRetrieval { + node_id: counterparty_node_id.clone(), + msg: msgs::PeerStorageRetrieval { + data: peer_state.peer_storage.clone() + }, + }); + } + for (_, chan) in peer_state.channel_by_id.iter_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context(), None); match chan.peer_connected_get_handshake(self.chain_hash, &&logger) { @@ -12451,6 +12531,7 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features.set_scid_privacy_optional(); features.set_zero_conf_optional(); features.set_route_blinding_optional(); + features.set_provide_storage_optional(); if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx { features.set_anchors_zero_fee_htlc_tx_optional(); } @@ -12972,6 +13053,8 @@ where peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self()); } + let mut peer_storage_dir: Vec<(&PublicKey, &Vec)> = Vec::new(); + (serializable_peer_count).write(writer)?; for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) { // Peers which we have no channels to should be dropped once disconnected. As we @@ -12981,6 +13064,8 @@ where if !peer_state.ok_to_remove(false) { peer_pubkey.write(writer)?; peer_state.latest_features.write(writer)?; + peer_storage_dir.push((peer_pubkey, &peer_state.peer_storage)); + if !peer_state.monitor_update_blocked_actions.is_empty() { monitor_update_blocked_actions_per_peer .get_or_insert_with(Vec::new) @@ -13102,6 +13187,7 @@ where (14, decode_update_add_htlcs_opt, option), (15, self.inbound_payment_id_secret, required), (17, in_flight_monitor_updates, required), + (19, peer_storage_dir, optional_vec), }); Ok(()) @@ -13334,6 +13420,7 @@ where monitor_update_blocked_actions: BTreeMap::new(), actions_blocking_raa_monitor_updates: BTreeMap::new(), closed_channel_monitor_update_ids: BTreeMap::new(), + peer_storage: Vec::new(), is_connected: false, } }; @@ -13629,6 +13716,7 @@ where let mut in_flight_monitor_updates: Option>> = None; let mut decode_update_add_htlcs: Option>> = None; let mut inbound_payment_id_secret = None; + let mut peer_storage_dir: Option)>> = None; read_tlv_fields!(reader, { (1, pending_outbound_payments_no_retry, option), (2, pending_intercepted_htlcs, option), @@ -13645,8 +13733,10 @@ where (14, decode_update_add_htlcs, option), (15, inbound_payment_id_secret, option), (17, in_flight_monitor_updates, required), + (19, peer_storage_dir, optional_vec), }); let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map()); + let peer_storage_dir: Vec<(PublicKey, Vec)> = peer_storage_dir.unwrap_or_else(Vec::new); if fake_scid_rand_bytes.is_none() { fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes()); } @@ -13678,6 +13768,12 @@ where } let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap()); + for (peer_pubkey, peer_storage) in peer_storage_dir { + if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { + peer_state.get_mut().unwrap().peer_storage = peer_storage; + } + } + // Handle transitioning from the legacy TLV to the new one on upgrades. if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates { // We should never serialize an empty map. @@ -14752,6 +14848,70 @@ mod tests { } } + #[test] + fn test_peer_storage() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + // Since we do not send peer storage, we manually simulate receiving a dummy + // `PeerStorage` from the channel partner. + nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msgs::PeerStorage{data: vec![0; 100]}); + + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + + nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + + let node_0_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(node_0_events.len(), 2); + + for msg in node_0_events{ + if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg { + nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), msg); + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + } else if let MessageSendEvent::SendPeerStorageRetrieval { ref node_id, ref msg } = msg { + nodes[1].node.handle_peer_storage_retrieval(nodes[0].node.get_our_node_id(), msg.clone()); + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + } else { + panic!("Unexpected event") + } + } + + let msg_events_after_peer_storage_retrieval = nodes[1].node.get_and_clear_pending_msg_events(); + + // Check if we receive a warning message. + let peer_storage_warning: Vec<&MessageSendEvent> = msg_events_after_peer_storage_retrieval + .iter() + .filter(|event| match event { + MessageSendEvent::HandleError { .. } => true, + _ => false, + }) + .collect(); + + assert_eq!(peer_storage_warning.len(), 1); + + match peer_storage_warning[0] { + MessageSendEvent::HandleError { node_id, action } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + match action { + ErrorAction::SendWarningMessage { msg, .. } => + assert_eq!(msg.data, "Invalid peer_storage_retrieval message received.".to_owned()), + _ => panic!("Unexpected error action"), + } + } + _ => panic!("Unexpected event"), + } + } + #[test] fn test_keysend_dup_payment_hash() { // (1): Test that a keysend payment with a duplicate payment hash to an existing pending diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 7822bdac9f1..fa21967fdde 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -880,6 +880,12 @@ macro_rules! get_htlc_update_msgs { /// such messages are intended to all peers. pub fn remove_first_msg_event_to_node(msg_node_id: &PublicKey, msg_events: &mut Vec) -> MessageSendEvent { let ev_index = msg_events.iter().position(|e| { match e { + MessageSendEvent::SendPeerStorage { node_id, .. } => { + node_id == msg_node_id + }, + MessageSendEvent::SendPeerStorageRetrieval { node_id, .. } => { + node_id == msg_node_id + }, MessageSendEvent::SendAcceptChannel { node_id, .. } => { node_id == msg_node_id }, diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 8204d90076a..08d6418cfe5 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -726,6 +726,36 @@ pub struct UpdateFulfillHTLC { pub payment_preimage: PaymentPreimage, } +/// A [`peer_storage`] message that can be sent to or received from a peer. +/// +/// This message is used to distribute backup data to peers. +/// If data is lost or corrupted, users can retrieve it through [`PeerStorageRetrieval`] +/// to recover critical information, such as channel states, for fund recovery. +/// +/// [`peer_storage`] is used to send our own encrypted backup data to a peer. +/// +/// [`peer_storage`]: https://github.com/lightning/bolts/pull/1110 +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct PeerStorage { + /// Our encrypted backup data included in the msg. + pub data: Vec, +} + +/// A [`peer_storage_retrieval`] message that can be sent to or received from a peer. +/// +/// This message is sent to peers for whom we store backup data. +/// If we receive this message, it indicates that the peer had stored our backup data. +/// This data can be used for fund recovery in case of data loss. +/// +/// [`peer_storage_retrieval`] is used to send the most recent backup of the peer. +/// +/// [`peer_storage_retrieval`]: https://github.com/lightning/bolts/pull/1110 +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct PeerStorageRetrieval { + /// Most recent peer's data included in the msg. + pub data: Vec, +} + /// An [`update_fail_htlc`] message to be sent to or received from a peer. /// /// [`update_fail_htlc`]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#removing-an-htlc-update_fulfill_htlc-update_fail_htlc-and-update_fail_malformed_htlc @@ -1508,6 +1538,12 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider { /// Handle an incoming `channel_ready` message from the given peer. fn handle_channel_ready(&self, their_node_id: PublicKey, msg: &ChannelReady); + // Peer Storage + /// Handle an incoming `peer_storage` message from the given peer. + fn handle_peer_storage(&self, their_node_id: PublicKey, msg: PeerStorage); + /// Handle an incoming `peer_storage_retrieval` message from the given peer. + fn handle_peer_storage_retrieval(&self, their_node_id: PublicKey, msg: PeerStorageRetrieval); + // Channel close: /// Handle an incoming `shutdown` message from the given peer. fn handle_shutdown(&self, their_node_id: PublicKey, msg: &Shutdown); @@ -2634,6 +2670,14 @@ impl_writeable_msg!(UpdateFulfillHTLC, { payment_preimage }, {}); +impl_writeable_msg!(PeerStorage, { + data +}, {}); + +impl_writeable_msg!(PeerStorageRetrieval, { + data +}, {}); + // Note that this is written as a part of ChannelManager objects, and thus cannot change its // serialization format in a way which assumes we know the total serialized length/message end // position. @@ -4536,6 +4580,26 @@ mod tests { assert_eq!(encoded_value, target_value); } + #[test] + fn encoding_peer_storage() { + let peer_storage = msgs::PeerStorage { + data: >::from_hex("01020304050607080910").unwrap() + }; + let encoded_value = peer_storage.encode(); + let target_value = >::from_hex("000a01020304050607080910").unwrap(); + assert_eq!(encoded_value, target_value); + } + + #[test] + fn encoding_peer_storage_retrieval() { + let peer_storage_retrieval = msgs::PeerStorageRetrieval { + data: >::from_hex("01020304050607080910").unwrap() + }; + let encoded_value = peer_storage_retrieval.encode(); + let target_value = >::from_hex("000a01020304050607080910").unwrap(); + assert_eq!(encoded_value, target_value); + } + #[test] fn encoding_pong() { let pong = msgs::Pong { diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 8df168fee12..aca1afbff39 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -327,6 +327,8 @@ impl ChannelMessageHandler for ErroringMessageHandler { } // msgs::ChannelUpdate does not contain the channel_id field, so we just drop them. fn handle_channel_update(&self, _their_node_id: PublicKey, _msg: &msgs::ChannelUpdate) {} + fn handle_peer_storage(&self, _their_node_id: PublicKey, _msg: msgs::PeerStorage) {} + fn handle_peer_storage_retrieval(&self, _their_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) {} fn peer_disconnected(&self, _their_node_id: PublicKey) {} fn peer_connected(&self, _their_node_id: PublicKey, _init: &msgs::Init, _inbound: bool) -> Result<(), ()> { Ok(()) } fn handle_error(&self, _their_node_id: PublicKey, _msg: &msgs::ErrorMessage) {} @@ -1850,6 +1852,12 @@ impl { self.message_handler.chan_handler.handle_channel_ready(their_node_id, &msg); }, + wire::Message::PeerStorage(msg) => { + self.message_handler.chan_handler.handle_peer_storage(their_node_id, msg); + }, + wire::Message::PeerStorageRetrieval(msg) => { + self.message_handler.chan_handler.handle_peer_storage_retrieval(their_node_id, msg); + }, // Quiescence messages: wire::Message::Stfu(msg) => { @@ -2163,6 +2171,14 @@ impl { + log_debug!(self.logger, "Handling SendPeerStorage event in peer_handler for {}", log_pubkey!(node_id)); + self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); + }, + MessageSendEvent::SendPeerStorageRetrieval { ref node_id, ref msg } => { + log_debug!(self.logger, "Handling SendPeerStorageRetrieval event in peer_handler for {}", log_pubkey!(node_id)); + self.enqueue_message(&mut *get_peer_for_forwarding!(node_id)?, msg); + }, MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => { log_debug!(WithContext::from(&self.logger, Some(*node_id), Some(msg.common_fields.temporary_channel_id), None), "Handling SendAcceptChannel event in peer_handler for node {} for channel {}", log_pubkey!(node_id), diff --git a/lightning/src/ln/wire.rs b/lightning/src/ln/wire.rs index 4cf5e21c173..c42cc4e4c92 100644 --- a/lightning/src/ln/wire.rs +++ b/lightning/src/ln/wire.rs @@ -53,6 +53,8 @@ pub(crate) enum Message where T: core::fmt::Debug + Type + TestEq { Warning(msgs::WarningMessage), Ping(msgs::Ping), Pong(msgs::Pong), + PeerStorage(msgs::PeerStorage), + PeerStorageRetrieval(msgs::PeerStorageRetrieval), OpenChannel(msgs::OpenChannel), OpenChannelV2(msgs::OpenChannelV2), AcceptChannel(msgs::AcceptChannel), @@ -111,6 +113,8 @@ impl Writeable for Message where T: core::fmt::Debug + Type + TestEq { &Message::Warning(ref msg) => msg.write(writer), &Message::Ping(ref msg) => msg.write(writer), &Message::Pong(ref msg) => msg.write(writer), + &Message::PeerStorage(ref msg) => msg.write(writer), + &Message::PeerStorageRetrieval(ref msg) => msg.write(writer), &Message::OpenChannel(ref msg) => msg.write(writer), &Message::OpenChannelV2(ref msg) => msg.write(writer), &Message::AcceptChannel(ref msg) => msg.write(writer), @@ -169,6 +173,8 @@ impl Type for Message where T: core::fmt::Debug + Type + TestEq { &Message::Warning(ref msg) => msg.type_id(), &Message::Ping(ref msg) => msg.type_id(), &Message::Pong(ref msg) => msg.type_id(), + &Message::PeerStorage(ref msg) => msg.type_id(), + &Message::PeerStorageRetrieval(ref msg) => msg.type_id(), &Message::OpenChannel(ref msg) => msg.type_id(), &Message::OpenChannelV2(ref msg) => msg.type_id(), &Message::AcceptChannel(ref msg) => msg.type_id(), @@ -261,6 +267,12 @@ fn do_read(buffer: &mut R, message_type: u1 msgs::Pong::TYPE => { Ok(Message::Pong(Readable::read(buffer)?)) }, + msgs::PeerStorage::TYPE => { + Ok(Message::PeerStorage(Readable::read(buffer)?)) + }, + msgs::PeerStorageRetrieval::TYPE => { + Ok(Message::PeerStorageRetrieval(Readable::read(buffer)?)) + }, msgs::OpenChannel::TYPE => { Ok(Message::OpenChannel(Readable::read(buffer)?)) }, @@ -448,6 +460,14 @@ impl Encode for msgs::Stfu { const TYPE: u16 = 2; } +impl Encode for msgs::PeerStorage { + const TYPE: u16 = 7; +} + +impl Encode for msgs::PeerStorageRetrieval { + const TYPE: u16 = 9; +} + impl Encode for msgs::Init { const TYPE: u16 = 16; } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 22853787974..9d02452010d 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -1118,6 +1118,16 @@ impl msgs::ChannelMessageHandler for TestChannelMessageHandler { self.received_msg(wire::Message::TxAbort(msg.clone())); } + fn handle_peer_storage(&self, _their_node_id: PublicKey, msg: msgs::PeerStorage) { + self.received_msg(wire::Message::PeerStorage(msg)); + } + + fn handle_peer_storage_retrieval( + &self, _their_node_id: PublicKey, msg: msgs::PeerStorageRetrieval, + ) { + self.received_msg(wire::Message::PeerStorageRetrieval(msg)); + } + fn message_received(&self) {} }