Skip to content

Commit

Permalink
LSPS2: Limit the total number of peers
Browse files Browse the repository at this point in the history
While LDK/`ChannelManager` should already introduce an upper-bound on
the number of peers, here we assert that our `PeerState` map can't
grow unboundedly. To this end, we simply return an `Internal error` and
abort when we would hit the limit of 100000 peers.
  • Loading branch information
tnull committed Dec 16, 2024
1 parent 7a89521 commit f68c6c5
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 7 deletions.
2 changes: 2 additions & 0 deletions lightning-liquidity/src/lsps0/ser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ pub(crate) const JSONRPC_RESULT_FIELD_KEY: &str = "result";
pub(crate) const JSONRPC_ERROR_FIELD_KEY: &str = "error";
pub(crate) const JSONRPC_INVALID_MESSAGE_ERROR_CODE: i32 = -32700;
pub(crate) const JSONRPC_INVALID_MESSAGE_ERROR_MESSAGE: &str = "parse error";
pub(crate) const JSONRPC_INTERNAL_ERROR_ERROR_CODE: i32 = -32603;
pub(crate) const JSONRPC_INTERNAL_ERROR_ERROR_MESSAGE: &str = "Internal error";

pub(crate) const LSPS0_CLIENT_REJECTED_ERROR_CODE: i32 = 1;

Expand Down
50 changes: 43 additions & 7 deletions lightning-liquidity/src/lsps2/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,17 @@
use crate::events::{Event, EventQueue};
use crate::lsps0::ser::{
LSPSMessage, ProtocolMessageHandler, RequestId, ResponseError, LSPS0_CLIENT_REJECTED_ERROR_CODE,
LSPSMessage, ProtocolMessageHandler, RequestId, ResponseError,
JSONRPC_INTERNAL_ERROR_ERROR_CODE, JSONRPC_INTERNAL_ERROR_ERROR_MESSAGE,
LSPS0_CLIENT_REJECTED_ERROR_CODE,
};
use crate::lsps2::event::LSPS2ServiceEvent;
use crate::lsps2::payment_queue::{InterceptedHTLC, PaymentQueue};
use crate::lsps2::utils::{
compute_opening_fee, is_expired_opening_fee_params, is_valid_opening_fee_params,
};
use crate::message_queue::MessageQueue;
use crate::prelude::hash_map::Entry;
use crate::prelude::{new_hash_map, HashMap, String, ToString, Vec};
use crate::sync::{Arc, Mutex, MutexGuard, RwLock};

Expand Down Expand Up @@ -47,6 +50,7 @@ use crate::lsps2::msgs::{

const MAX_PENDING_REQUESTS_PER_PEER: usize = 10;
const MAX_TOTAL_PENDING_REQUESTS: usize = 1000;
const MAX_TOTAL_PEERS: usize = 100000;

/// Server-side configuration options for JIT channels.
#[derive(Clone, Debug)]
Expand Down Expand Up @@ -511,6 +515,40 @@ impl PeerState {
}
}

macro_rules! get_or_insert_peer_state_entry {
($self: ident, $outer_state_lock: expr, $counterparty_node_id: expr) => {{
// Return an internal error and abort if we hit the maximum allowed number of total peers.
let is_limited_by_max_total_peers = $outer_state_lock.len() >= MAX_TOTAL_PEERS;
match $outer_state_lock.entry(*$counterparty_node_id) {
Entry::Vacant(e) => {
if is_limited_by_max_total_peers {
let error_response = ResponseError {
code: JSONRPC_INTERNAL_ERROR_ERROR_CODE,
message: JSONRPC_INTERNAL_ERROR_ERROR_MESSAGE.to_string(), data: None,
};

let msg = LSPSMessage::Invalid(error_response);
drop($outer_state_lock);
$self.pending_messages.enqueue($counterparty_node_id, msg);

let err = format!(
"Dropping request from peer {} due to reaching maximally allowed number of total peers: {}",
$counterparty_node_id, MAX_TOTAL_PEERS
);

return Err(LightningError { err, action: ErrorAction::IgnoreAndLog(Level::Error) });
} else {
e.insert(Mutex::new(PeerState::new()))
}
}
Entry::Occupied(e) => {
e.into_mut()
}
}

}}
}

/// The main object allowing to send and receive LSPS2 messages.
pub struct LSPS2ServiceHandler<CM: Deref + Clone>
where
Expand Down Expand Up @@ -1042,9 +1080,8 @@ where
) -> Result<(), LightningError> {
let (result, response) = {
let mut outer_state_lock = self.per_peer_state.write().unwrap();
let inner_state_lock: &mut Mutex<PeerState> = outer_state_lock
.entry(*counterparty_node_id)
.or_insert(Mutex::new(PeerState::new()));
let inner_state_lock =
get_or_insert_peer_state_entry!(self, outer_state_lock, counterparty_node_id);
let mut peer_state_lock = inner_state_lock.lock().unwrap();
let request = LSPS2Request::GetInfo(params.clone());
match self.insert_pending_request(
Expand Down Expand Up @@ -1161,9 +1198,8 @@ where

let (result, response) = {
let mut outer_state_lock = self.per_peer_state.write().unwrap();
let inner_state_lock = outer_state_lock
.entry(*counterparty_node_id)
.or_insert(Mutex::new(PeerState::new()));
let inner_state_lock =
get_or_insert_peer_state_entry!(self, outer_state_lock, counterparty_node_id);
let mut peer_state_lock = inner_state_lock.lock().unwrap();

let request = LSPS2Request::Buy(params.clone());
Expand Down

0 comments on commit f68c6c5

Please sign in to comment.