-
Notifications
You must be signed in to change notification settings - Fork 385
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Correct and update confirmation target constant definitions #3608
Changes from all commits
d18fc72
e027783
ac74d96
ccf8d44
e7c2a61
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -40,7 +40,7 @@ use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentPath, | |||||
use crate::chain; | ||||||
use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock}; | ||||||
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; | ||||||
use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent}; | ||||||
use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, MAX_BLOCKS_FOR_CONF, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent}; | ||||||
use crate::chain::transaction::{OutPoint, TransactionData}; | ||||||
use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent}; | ||||||
// Since this struct is returned in `list_channels` methods, expose it here in case users want to | ||||||
|
@@ -2824,7 +2824,7 @@ pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24; | |||||
pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7; | ||||||
|
||||||
/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound | ||||||
/// HTLC's CLTV. The current default represents roughly seven hours of blocks at six blocks/hour. | ||||||
/// HTLC's CLTV. The current default represents roughly eight hours of blocks at six blocks/hour. | ||||||
/// | ||||||
/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`] | ||||||
/// | ||||||
|
@@ -2833,7 +2833,7 @@ pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7; | |||||
// i.e. the node we forwarded the payment on to should always have enough room to reliably time out | ||||||
// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the | ||||||
// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more). | ||||||
pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7; | ||||||
pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*8; | ||||||
TheBlueMatt marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||
// This should be long enough to allow a payment path drawn across multiple routing hops with substantial | ||||||
// `cltv_expiry_delta`. Indeed, the length of those values is the reaction delay offered to a routing node | ||||||
// in case of HTLC on-chain settlement. While appearing less competitive, a node operator could decide to | ||||||
|
@@ -2850,19 +2850,34 @@ pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6; | |||||
// a payment was being routed, so we add an extra block to be safe. | ||||||
pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3; | ||||||
|
||||||
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS, | ||||||
// ie that if the next-hop peer fails the HTLC within | ||||||
// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain, | ||||||
// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and | ||||||
// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before | ||||||
// LATENCY_GRACE_PERIOD_BLOCKS. | ||||||
#[allow(dead_code)] | ||||||
const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; | ||||||
// Check that our MIN_CLTV_EXPIRY_DELTA gives us enough time to get everything on chain and locked | ||||||
// in with enough time left to fail the corresponding HTLC back to our inbound edge before they | ||||||
// force-close on us. | ||||||
// In other words, if the next-hop peer fails HTLC LATENCY_GRACE_PERIOD_BLOCKS after our | ||||||
// CLTV_CLAIM_BUFFER (because that's how many blocks we allow them after expiry), we'll still have | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. s/after expiry/before expiry? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No after. We FC a channel several blocks after an HTLC expires. |
||||||
// 2*MAX_BLOCKS_FOR_CONF + ANTI_REORG_DELAY left to get two transactions on chain and the second | ||||||
// fully locked in before the peer force-closes on us (LATENCY_GRACE_PERIOD_BLOCKS before the | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. suggestion:
Suggested change
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That reads kinda confusing to me? Makes it sound like the channel was inbound from the peer, rather than the HTLC was relayed inbound from the peer? |
||||||
// expiry, i.e. assuming the peer force-closes right at the expiry and we're behind by | ||||||
// LATENCY_GRACE_PERIOD_BLOCKS). | ||||||
const _CHECK_CLTV_EXPIRY_SANITY: () = assert!( | ||||||
MIN_CLTV_EXPIRY_DELTA as u32 >= 2*LATENCY_GRACE_PERIOD_BLOCKS + 2*MAX_BLOCKS_FOR_CONF + ANTI_REORG_DELAY | ||||||
); | ||||||
|
||||||
// Check that our MIN_CLTV_EXPIRY_DELTA gives us enough time to get the HTLC preimage back to our | ||||||
// counterparty if the outbound edge gives us the preimage only one block before we'd force-close | ||||||
// the channel. | ||||||
// ie they provide the preimage LATENCY_GRACE_PERIOD_BLOCKS - 1 after the HTLC expires, then we | ||||||
// pass the preimage back, which takes LATENCY_GRACE_PERIOD_BLOCKS to complete, and we want to make | ||||||
// sure this all happens at least N blocks before the inbound HTLC expires (where N is the | ||||||
// counterparty's CLTV_CLAIM_BUFFER or equivalent). | ||||||
const _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER: u32 = 6 * 6; | ||||||
|
||||||
// Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See | ||||||
// ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed. | ||||||
#[allow(dead_code)] | ||||||
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; | ||||||
const _CHECK_COUNTERPARTY_REALISTIC: () = | ||||||
assert!(_ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER >= CLTV_CLAIM_BUFFER); | ||||||
|
||||||
const _CHECK_CLTV_EXPIRY_OFFCHAIN: () = assert!( | ||||||
MIN_CLTV_EXPIRY_DELTA as u32 >= 2*LATENCY_GRACE_PERIOD_BLOCKS - 1 + _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER | ||||||
); | ||||||
|
||||||
/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs | ||||||
pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3; | ||||||
|
@@ -15979,15 +15994,15 @@ mod tests { | |||||
let current_height: u32 = node[0].node.best_block.read().unwrap().height; | ||||||
let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive(msgs::InboundOnionReceivePayload { | ||||||
sender_intended_htlc_amt_msat: 100, | ||||||
cltv_expiry_height: 22, | ||||||
cltv_expiry_height: TEST_FINAL_CLTV, | ||||||
payment_metadata: None, | ||||||
keysend_preimage: None, | ||||||
payment_data: Some(msgs::FinalOnionHopData { | ||||||
payment_secret: PaymentSecret([0; 32]), | ||||||
total_msat: 100, | ||||||
}), | ||||||
custom_tlvs: Vec::new(), | ||||||
}), [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None, current_height); | ||||||
}), [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1, None, true, None, current_height); | ||||||
|
||||||
// Should not return an error as this condition: | ||||||
// https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334 | ||||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
"HTLC-Success transaction" phrasing seems to suggest this const is only used in the context of channels with inbound HTLC(s) where we have the preimage. But it seems to be used for inbounds where we don't have the preimage as well, and/or other contexts? I wonder if this could be clarified?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not directly? Its only used directly as
!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)
and indirectly to calculateHTLC_FAIL_BACK_BUFFER
which is the same concept.