@@ -8230,74 +8230,50 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8230
8230
}
8231
8231
}
8232
8232
8233
- fn internal_peer_storage_retrieval(&self, counterparty_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) {
8233
+ fn internal_peer_storage_retrieval(&self, counterparty_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) -> Result<(), MsgHandleErrInternal> {
8234
8234
// TODO: Decrypt and check if have any stale or missing ChannelMonitor.
8235
- let per_peer_state = self.per_peer_state.read().unwrap();
8236
- let peer_state_mutex = match per_peer_state.get(&counterparty_node_id) {
8237
- Some(peer_state_mutex) => peer_state_mutex,
8238
- None => return,
8239
- };
8240
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8241
- let peer_state = &mut *peer_state_lock;
8242
8235
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
8243
8236
8244
8237
log_debug!(logger, "Received unexpected peer_storage_retrieval from {}. This is unusual since we do not yet distribute peer storage. Sending a warning.", log_pubkey!(counterparty_node_id));
8245
- peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
8246
- node_id: counterparty_node_id.clone(),
8247
- action: msgs::ErrorAction::SendWarningMessage {
8248
- msg: msgs::WarningMessage {
8249
- channel_id: ChannelId([0; 32]),
8250
- data: "Invalid peer_storage_retrieval message received.".to_owned()
8251
- },
8252
- log_level: Level::Trace,
8253
- }
8254
- });
8238
+
8239
+ Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8240
+ format!("Invalid peer_storage_retrieval message received.")
8241
+ ), ChannelId([0; 32])))
8255
8242
}
8256
8243
8257
- fn internal_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) {
8244
+ fn internal_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) -> Result<(), MsgHandleErrInternal> {
8258
8245
let per_peer_state = self.per_peer_state.read().unwrap();
8259
- let peer_state_mutex = match per_peer_state.get(&counterparty_node_id) {
8260
- Some(peer_state_mutex) => peer_state_mutex,
8261
- None => return,
8262
- };
8246
+ let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
8247
+ .ok_or_else(|| {
8248
+ debug_assert!(false);
8249
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), ChannelId([0; 32]))
8250
+ })?;
8251
+
8263
8252
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8264
8253
let peer_state = &mut *peer_state_lock;
8265
8254
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
8266
8255
8267
8256
// Check if we have any channels with the peer (Currently we only provide the service to peers we have a channel with).
8268
8257
if !peer_state.channel_by_id.values().any(|phase| phase.is_funded()) {
8269
8258
log_debug!(logger, "Ignoring peer storage request from {} as we don't have any funded channels with them.", log_pubkey!(counterparty_node_id));
8270
- peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
8271
- node_id: counterparty_node_id.clone(),
8272
- action: msgs::ErrorAction::SendWarningMessage {
8273
- msg: msgs::WarningMessage {
8274
- channel_id: ChannelId([0; 32]),
8275
- data: "Ignoring peer_storage message, as peer storage is currently supported only for peers with an active funded channel.".to_owned()
8276
- },
8277
- log_level: Level::Trace,
8278
- }
8279
- });
8280
- return;
8259
+ return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8260
+ format!("Ignoring peer_storage message, as peer storage is currently supported only for peers with an active funded channel.")
8261
+ ), ChannelId([0; 32])));
8281
8262
}
8282
8263
8283
8264
#[cfg(not(test))]
8284
8265
if msg.data.len() > MAX_PEER_STORAGE_SIZE {
8285
8266
log_debug!(logger, "Sending warning to peer and ignoring peer storage request from {} as its over 1KiB", log_pubkey!(counterparty_node_id));
8286
- peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
8287
- node_id: counterparty_node_id.clone(),
8288
- action: msgs::ErrorAction::SendWarningMessage {
8289
- msg: msgs::WarningMessage {
8290
- channel_id: ChannelId([0; 32]),
8291
- data: format!("Supports only data up to {} bytes in peer storage.", MAX_PEER_STORAGE_SIZE)
8292
- },
8293
- log_level: Level::Trace,
8294
- }
8295
- });
8296
- return;
8267
+
8268
+ return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8269
+ format!("Supports only data up to {} bytes in peer storage.", MAX_PEER_STORAGE_SIZE)
8270
+ ), ChannelId([0; 32])));
8297
8271
}
8298
8272
8299
8273
log_trace!(logger, "Received peer_storage from {}", log_pubkey!(counterparty_node_id));
8300
8274
peer_state.peer_storage = msg.data;
8275
+
8276
+ Ok(())
8301
8277
}
8302
8278
8303
8279
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
@@ -11498,12 +11474,12 @@ where
11498
11474
11499
11475
fn handle_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) {
11500
11476
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents);
11501
- self.internal_peer_storage(counterparty_node_id, msg);
11477
+ let _ = handle_error!( self, self .internal_peer_storage(counterparty_node_id, msg), counterparty_node_id );
11502
11478
}
11503
11479
11504
11480
fn handle_peer_storage_retrieval(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorageRetrieval) {
11505
11481
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents);
11506
- self.internal_peer_storage_retrieval(counterparty_node_id, msg);
11482
+ let _ = handle_error!( self, self .internal_peer_storage_retrieval(counterparty_node_id, msg), counterparty_node_id );
11507
11483
}
11508
11484
11509
11485
fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) {
0 commit comments