Skip to content

Commit f68c6c5

Browse files
committed
LSPS2: Limit the total number of peers
While LDK/`ChannelManager` should already introduce an upper-bound on the number of peers, here we assert that our `PeerState` map can't grow unboundedly. To this end, we simply return an `Internal error` and abort when we would hit the limit of 100000 peers.
1 parent 7a89521 commit f68c6c5

File tree

2 files changed

+45
-7
lines changed

2 files changed

+45
-7
lines changed

lightning-liquidity/src/lsps0/ser.rs

+2
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,8 @@ pub(crate) const JSONRPC_RESULT_FIELD_KEY: &str = "result";
4040
pub(crate) const JSONRPC_ERROR_FIELD_KEY: &str = "error";
4141
pub(crate) const JSONRPC_INVALID_MESSAGE_ERROR_CODE: i32 = -32700;
4242
pub(crate) const JSONRPC_INVALID_MESSAGE_ERROR_MESSAGE: &str = "parse error";
43+
pub(crate) const JSONRPC_INTERNAL_ERROR_ERROR_CODE: i32 = -32603;
44+
pub(crate) const JSONRPC_INTERNAL_ERROR_ERROR_MESSAGE: &str = "Internal error";
4345

4446
pub(crate) const LSPS0_CLIENT_REJECTED_ERROR_CODE: i32 = 1;
4547

lightning-liquidity/src/lsps2/service.rs

+43-7
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,17 @@
1111
1212
use crate::events::{Event, EventQueue};
1313
use crate::lsps0::ser::{
14-
LSPSMessage, ProtocolMessageHandler, RequestId, ResponseError, LSPS0_CLIENT_REJECTED_ERROR_CODE,
14+
LSPSMessage, ProtocolMessageHandler, RequestId, ResponseError,
15+
JSONRPC_INTERNAL_ERROR_ERROR_CODE, JSONRPC_INTERNAL_ERROR_ERROR_MESSAGE,
16+
LSPS0_CLIENT_REJECTED_ERROR_CODE,
1517
};
1618
use crate::lsps2::event::LSPS2ServiceEvent;
1719
use crate::lsps2::payment_queue::{InterceptedHTLC, PaymentQueue};
1820
use crate::lsps2::utils::{
1921
compute_opening_fee, is_expired_opening_fee_params, is_valid_opening_fee_params,
2022
};
2123
use crate::message_queue::MessageQueue;
24+
use crate::prelude::hash_map::Entry;
2225
use crate::prelude::{new_hash_map, HashMap, String, ToString, Vec};
2326
use crate::sync::{Arc, Mutex, MutexGuard, RwLock};
2427

@@ -47,6 +50,7 @@ use crate::lsps2::msgs::{
4750

4851
const MAX_PENDING_REQUESTS_PER_PEER: usize = 10;
4952
const MAX_TOTAL_PENDING_REQUESTS: usize = 1000;
53+
const MAX_TOTAL_PEERS: usize = 100000;
5054

5155
/// Server-side configuration options for JIT channels.
5256
#[derive(Clone, Debug)]
@@ -511,6 +515,40 @@ impl PeerState {
511515
}
512516
}
513517

518+
macro_rules! get_or_insert_peer_state_entry {
519+
($self: ident, $outer_state_lock: expr, $counterparty_node_id: expr) => {{
520+
// Return an internal error and abort if we hit the maximum allowed number of total peers.
521+
let is_limited_by_max_total_peers = $outer_state_lock.len() >= MAX_TOTAL_PEERS;
522+
match $outer_state_lock.entry(*$counterparty_node_id) {
523+
Entry::Vacant(e) => {
524+
if is_limited_by_max_total_peers {
525+
let error_response = ResponseError {
526+
code: JSONRPC_INTERNAL_ERROR_ERROR_CODE,
527+
message: JSONRPC_INTERNAL_ERROR_ERROR_MESSAGE.to_string(), data: None,
528+
};
529+
530+
let msg = LSPSMessage::Invalid(error_response);
531+
drop($outer_state_lock);
532+
$self.pending_messages.enqueue($counterparty_node_id, msg);
533+
534+
let err = format!(
535+
"Dropping request from peer {} due to reaching maximally allowed number of total peers: {}",
536+
$counterparty_node_id, MAX_TOTAL_PEERS
537+
);
538+
539+
return Err(LightningError { err, action: ErrorAction::IgnoreAndLog(Level::Error) });
540+
} else {
541+
e.insert(Mutex::new(PeerState::new()))
542+
}
543+
}
544+
Entry::Occupied(e) => {
545+
e.into_mut()
546+
}
547+
}
548+
549+
}}
550+
}
551+
514552
/// The main object allowing to send and receive LSPS2 messages.
515553
pub struct LSPS2ServiceHandler<CM: Deref + Clone>
516554
where
@@ -1042,9 +1080,8 @@ where
10421080
) -> Result<(), LightningError> {
10431081
let (result, response) = {
10441082
let mut outer_state_lock = self.per_peer_state.write().unwrap();
1045-
let inner_state_lock: &mut Mutex<PeerState> = outer_state_lock
1046-
.entry(*counterparty_node_id)
1047-
.or_insert(Mutex::new(PeerState::new()));
1083+
let inner_state_lock =
1084+
get_or_insert_peer_state_entry!(self, outer_state_lock, counterparty_node_id);
10481085
let mut peer_state_lock = inner_state_lock.lock().unwrap();
10491086
let request = LSPS2Request::GetInfo(params.clone());
10501087
match self.insert_pending_request(
@@ -1161,9 +1198,8 @@ where
11611198

11621199
let (result, response) = {
11631200
let mut outer_state_lock = self.per_peer_state.write().unwrap();
1164-
let inner_state_lock = outer_state_lock
1165-
.entry(*counterparty_node_id)
1166-
.or_insert(Mutex::new(PeerState::new()));
1201+
let inner_state_lock =
1202+
get_or_insert_peer_state_entry!(self, outer_state_lock, counterparty_node_id);
11671203
let mut peer_state_lock = inner_state_lock.lock().unwrap();
11681204

11691205
let request = LSPS2Request::Buy(params.clone());

0 commit comments

Comments
 (0)