diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index c2a49b8ee24..73e4f88f1f3 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -48,7 +48,7 @@ use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; use lightning::ln::channel_state::ChannelDetails; use lightning::ln::channelmanager::{ ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, RecentPaymentDetails, - RecipientOnionFields, Retry, + RecipientOnionFields, }; use lightning::ln::functional_test_utils::*; use lightning::ln::inbound_payment::ExpandedKey; @@ -82,7 +82,6 @@ use bitcoin::secp256k1::{self, Message, PublicKey, Scalar, Secp256k1, SecretKey} use lightning::io::Cursor; use std::cmp::{self, Ordering}; -use std::collections::VecDeque; use std::mem; use std::sync::atomic; use std::sync::{Arc, Mutex}; @@ -113,22 +112,14 @@ impl FeeEstimator for FuzzEstimator { } } -struct FuzzRouter { - pub next_routes: Mutex>, -} +struct FuzzRouter {} impl Router for FuzzRouter { fn find_route( &self, _payer: &PublicKey, _params: &RouteParameters, _first_hops: Option<&[&ChannelDetails]>, _inflight_htlcs: InFlightHtlcs, ) -> Result { - if let Some(route) = self.next_routes.lock().unwrap().pop_front() { - return Ok(route); - } - Err(msgs::LightningError { - err: String::from("Not implemented"), - action: msgs::ErrorAction::IgnoreError, - }) + unreachable!() } fn create_blinded_payment_paths( @@ -518,7 +509,7 @@ fn send_payment( PaymentParameters::from_node_id(source.get_our_node_id(), TEST_FINAL_CLTV), amt, ); - source.router.next_routes.lock().unwrap().push_back(Route { + let route = Route { paths: vec![Path { hops: vec![RouteHop { pubkey: dest.get_our_node_id(), @@ -532,11 +523,10 @@ fn send_payment( blinded_tail: None, }], route_params: Some(route_params.clone()), - }); + }; let onion = RecipientOnionFields::secret_only(payment_secret); let payment_id = PaymentId(payment_id); - let res = - source.send_payment(payment_hash, onion, payment_id, route_params, Retry::Attempts(0)); + let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { panic!("Errored with {:?} on initial payment send", err); @@ -592,7 +582,7 @@ fn send_hop_payment( PaymentParameters::from_node_id(source.get_our_node_id(), TEST_FINAL_CLTV), amt, ); - source.router.next_routes.lock().unwrap().push_back(Route { + let route = Route { paths: vec![Path { hops: vec![ RouteHop { @@ -617,11 +607,10 @@ fn send_hop_payment( blinded_tail: None, }], route_params: Some(route_params.clone()), - }); + }; let onion = RecipientOnionFields::secret_only(payment_secret); let payment_id = PaymentId(payment_id); - let res = - source.send_payment(payment_hash, onion, payment_id, route_params, Retry::Attempts(0)); + let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { panic!("Errored with {:?} on initial payment send", err); @@ -640,7 +629,7 @@ fn send_hop_payment( pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let out = SearchingOutput::new(underlying_out); let broadcast = Arc::new(TestBroadcaster {}); - let router = FuzzRouter { next_routes: Mutex::new(VecDeque::new()) }; + let router = FuzzRouter {}; macro_rules! make_node { ($node_id: expr, $fee_estimator: expr) => {{ diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index af7c7ffb003..adcae564f56 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1099,7 +1099,7 @@ mod tests { SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::ser::Writeable; - use lightning::util::sweep::{OutputSpendStatus, OutputSweeper}; + use lightning::util::sweep::{OutputSpendStatus, OutputSweeper, PRUNE_DELAY_BLOCKS}; use lightning::util::test_utils; use lightning::{get_event, get_event_msg}; use lightning_persister::fs_store::FilesystemStore; @@ -2282,8 +2282,8 @@ mod tests { } // Check we stop tracking the spendable outputs when one of the txs reaches - // ANTI_REORG_DELAY confirmations. - confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, ANTI_REORG_DELAY); + // PRUNE_DELAY_BLOCKS confirmations. + confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, PRUNE_DELAY_BLOCKS); assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 0); if !std::thread::panicking() { diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 07c8342b5ea..17cc41f9502 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -48,6 +48,7 @@ use core::iter::FilterMap; use core::num::ParseIntError; use core::ops::Deref; use core::slice::Iter; +use core::str::FromStr; use core::time::Duration; #[cfg(feature = "serde")] @@ -78,8 +79,12 @@ use crate::prelude::*; /// Re-export serialization traits #[cfg(fuzzing)] pub use crate::de::FromBase32; +#[cfg(not(fuzzing))] +use crate::de::FromBase32; #[cfg(fuzzing)] pub use crate::ser::Base32Iterable; +#[cfg(not(fuzzing))] +use crate::ser::Base32Iterable; /// Errors that indicate what is wrong with the invoice. They have some granularity for debug /// reasons, but should generally result in an "invalid BOLT11 invoice" message for the user. @@ -1086,9 +1091,6 @@ impl RawBolt11Invoice { /// Calculate the hash of the encoded `RawBolt11Invoice` which should be signed. pub fn signable_hash(&self) -> [u8; 32] { - #[cfg(not(fuzzing))] - use crate::ser::Base32Iterable; - Self::hash_from_parts(self.hrp.to_string().as_bytes(), self.data.fe_iter()) } @@ -1189,6 +1191,21 @@ impl RawBolt11Invoice { pub fn currency(&self) -> Currency { self.hrp.currency.clone() } + + /// Convert to HRP prefix and Fe32 encoded data part. + /// Can be used to transmit unsigned invoices for remote signing. + pub fn to_raw(&self) -> (String, Vec) { + (self.hrp.to_string(), self.data.fe_iter().collect()) + } + + /// Convert from HRP prefix and Fe32 encoded data part. + /// Can be used to receive unsigned invoices for remote signing. + pub fn from_raw(hrp: &str, data: &[Fe32]) -> Result { + let raw_hrp: RawHrp = RawHrp::from_str(hrp)?; + let data_part = RawDataPart::from_base32(data)?; + + Ok(Self { hrp: raw_hrp, data: data_part }) + } } impl PositiveTimestamp { diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 84281df1d7b..b9c7e88420d 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -176,7 +176,7 @@ pub trait FeeEstimator { } /// Minimum relay fee as required by bitcoin network mempool policy. -pub const MIN_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 4000; +pub const MIN_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 253; /// Minimum feerate that takes a sane approach to bitcoind weight-to-vbytes rounding. /// See the following Core Lightning commit for an explanation: /// diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 3f6bdc3f256..8788fbb894d 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -256,6 +256,10 @@ pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3; // solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not // keep bumping another claim tx to solve the outpoint. pub const ANTI_REORG_DELAY: u32 = 6; +/// Number of blocks we wait before assuming a [`ChannelMonitor`] to be fully resolved and +/// considering it be safely archived. +// 4032 blocks are roughly four weeks +pub const ARCHIVAL_DELAY_BLOCKS: u32 = 4032; /// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we /// refuse to accept a new HTLC. /// @@ -1023,6 +1027,12 @@ pub(crate) struct ChannelMonitorImpl { /// The first block height at which we had no remaining claimable balances. balances_empty_height: Option, + + /// In-memory only HTLC ids used to track upstream HTLCs that have been failed backwards due to + /// a downstream channel force-close remaining unconfirmed by the time the upstream timeout + /// expires. This is used to tell us we already generated an event to fail this HTLC back + /// during a previous block scan. + failed_back_htlc_ids: HashSet, } /// Transaction outputs to watch for on-chain spends. @@ -1445,6 +1455,8 @@ impl ChannelMonitor { counterparty_node_id: Some(counterparty_node_id), initial_counterparty_commitment_info: None, balances_empty_height: None, + + failed_back_htlc_ids: new_hash_set(), }) } @@ -2015,10 +2027,11 @@ impl ChannelMonitor { /// /// This function returns a tuple of two booleans, the first indicating whether the monitor is /// fully resolved, and the second whether the monitor needs persistence to ensure it is - /// reliably marked as resolved within 4032 blocks. + /// reliably marked as resolved within [`ARCHIVAL_DELAY_BLOCKS`] blocks. /// - /// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at least - /// 4032 blocks as an additional protection against any bugs resulting in spuriously empty balance sets. + /// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at + /// least [`ARCHIVAL_DELAY_BLOCKS`] blocks as an additional protection against any bugs + /// resulting in spuriously empty balance sets. pub fn check_and_update_full_resolution_status(&self, logger: &L) -> (bool, bool) { let mut is_all_funds_claimed = self.get_claimable_balances().is_empty(); let current_height = self.current_best_block().height; @@ -2034,11 +2047,10 @@ impl ChannelMonitor { // once processed, implies the preimage exists in the corresponding inbound channel. let preimages_not_needed_elsewhere = inner.pending_monitor_events.is_empty(); - const BLOCKS_THRESHOLD: u32 = 4032; // ~four weeks match (inner.balances_empty_height, is_all_funds_claimed, preimages_not_needed_elsewhere) { (Some(balances_empty_height), true, true) => { // Claimed all funds, check if reached the blocks threshold. - (current_height >= balances_empty_height + BLOCKS_THRESHOLD, false) + (current_height >= balances_empty_height + ARCHIVAL_DELAY_BLOCKS, false) }, (Some(_), false, _)|(Some(_), _, false) => { // previously assumed we claimed all funds, but we have new funds to claim or @@ -2058,7 +2070,7 @@ impl ChannelMonitor { // None. It is set to the current block height. log_debug!(logger, "ChannelMonitor funded at {} is now fully resolved. It will become archivable in {} blocks", - inner.get_funding_txo().0, BLOCKS_THRESHOLD); + inner.get_funding_txo().0, ARCHIVAL_DELAY_BLOCKS); inner.balances_empty_height = Some(current_height); (false, true) }, @@ -3274,7 +3286,7 @@ impl ChannelMonitorImpl { } } - if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain) && is_pre_close_update { + if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed) && is_pre_close_update { log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); Err(()) } else { ret } @@ -4221,6 +4233,71 @@ impl ChannelMonitorImpl { } } + if self.lockdown_from_offchain || self.funding_spend_seen || self.holder_tx_signed { + // Fail back HTLCs on backwards channels if they expire within + // `LATENCY_GRACE_PERIOD_BLOCKS` blocks and the channel is closed (i.e. we're at a + // point where no further off-chain updates will be accepted). If we haven't seen the + // preimage for an HTLC by the time the previous hop's timeout expires, we've lost that + // HTLC, so we might as well fail it back instead of having our counterparty force-close + // the inbound channel. + let current_holder_htlcs = self.current_holder_commitment_tx.htlc_outputs.iter() + .map(|&(ref a, _, ref b)| (a, b.as_ref())); + + let current_counterparty_htlcs = if let Some(txid) = self.current_counterparty_commitment_txid { + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) { + Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed)))) + } else { None } + } else { None }.into_iter().flatten(); + + let prev_counterparty_htlcs = if let Some(txid) = self.prev_counterparty_commitment_txid { + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) { + Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed)))) + } else { None } + } else { None }.into_iter().flatten(); + + let htlcs = current_holder_htlcs + .chain(current_counterparty_htlcs) + .chain(prev_counterparty_htlcs); + + let height = self.best_block.height; + for (htlc, source_opt) in htlcs { + // Only check forwarded HTLCs' previous hops + let source = match source_opt { + Some(source) => source, + None => continue, + }; + let inbound_htlc_expiry = match source.inbound_htlc_expiry() { + Some(cltv_expiry) => cltv_expiry, + None => continue, + }; + let max_expiry_height = height.saturating_add(LATENCY_GRACE_PERIOD_BLOCKS); + if inbound_htlc_expiry > max_expiry_height { + continue; + } + let duplicate_event = self.pending_monitor_events.iter().any( + |update| if let &MonitorEvent::HTLCEvent(ref upd) = update { + upd.source == *source + } else { false }); + if duplicate_event { + continue; + } + if !self.failed_back_htlc_ids.insert(SentHTLCId::from_source(source)) { + continue; + } + if !duplicate_event { + log_error!(logger, "Failing back HTLC {} upstream to preserve the \ + channel as the forward HTLC hasn't resolved and our backward HTLC \ + expires soon at {}", log_bytes!(htlc.payment_hash.0), inbound_htlc_expiry); + self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { + source: source.clone(), + payment_preimage: None, + payment_hash: htlc.payment_hash, + htlc_value_satoshis: Some(htlc.amount_msat / 1000), + })); + } + } + } + let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); @@ -5066,6 +5143,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP counterparty_node_id, initial_counterparty_commitment_info, balances_empty_height, + failed_back_htlc_ids: new_hash_set(), }))) } } @@ -5092,7 +5170,7 @@ mod tests { use crate::chain::chaininterface::LowerBoundedFeeEstimator; use super::ChannelMonitorUpdateStep; - use crate::{check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash, unwrap_send_err}; + use crate::{check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash}; use crate::chain::{BestBlock, Confirm}; use crate::chain::channelmonitor::{ChannelMonitor, WithChannelMonitor}; use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT}; @@ -5102,10 +5180,9 @@ mod tests { use crate::types::payment::{PaymentPreimage, PaymentHash}; use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey}; use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; - use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields}; + use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; use crate::ln::functional_test_utils::*; use crate::ln::script::ShutdownScript; - use crate::util::errors::APIError; use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator}; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::logger::Logger; @@ -5166,9 +5243,9 @@ mod tests { // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass // the update through to the ChannelMonitor which will refuse it (as the channel is closed). let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 100_000); - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[1].node.send_payment_with_route(route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) + ).unwrap(); check_added_monitors!(nodes[1], 1); // Build a new ChannelMonitorUpdate which contains both the failing commitment tx update diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 759668cfa9c..2a43b006920 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -215,6 +215,7 @@ pub(crate) enum OnchainClaim { } /// Represents the different feerate strategies a pending request can use when generating a claim. +#[derive(Debug)] pub(crate) enum FeerateStrategy { /// We must reuse the most recently used feerate, if any. RetryPrevious, diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 53bba3a754b..55214006d4c 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -1117,10 +1117,10 @@ impl PackageTemplate { // If old feerate is 0, first iteration of this claim, use normal fee calculation if self.feerate_previous != 0 { if let Some((new_fee, feerate)) = feerate_bump( - predicted_weight, input_amounts, self.feerate_previous, feerate_strategy, - conf_target, fee_estimator, logger, + predicted_weight, input_amounts, dust_limit_sats, self.feerate_previous, + feerate_strategy, conf_target, fee_estimator, logger, ) { - return Some((cmp::max(input_amounts as i64 - new_fee as i64, dust_limit_sats as i64) as u64, feerate)); + return Some((input_amounts.saturating_sub(new_fee), feerate)); } } else { if let Some((new_fee, feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, conf_target, fee_estimator, logger) { @@ -1270,16 +1270,20 @@ fn compute_fee_from_spent_amounts( /// respect BIP125 rules 3) and 4) and if required adjust the new fee to meet the RBF policy /// requirement. fn feerate_bump( - predicted_weight: u64, input_amounts: u64, previous_feerate: u64, feerate_strategy: &FeerateStrategy, - conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + predicted_weight: u64, input_amounts: u64, dust_limit_sats: u64, previous_feerate: u64, + feerate_strategy: &FeerateStrategy, conf_target: ConfirmationTarget, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Option<(u64, u64)> where F::Target: FeeEstimator, { + let previous_fee = previous_feerate * predicted_weight / 1000; + // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee... let (new_fee, new_feerate) = if let Some((new_fee, new_feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, conf_target, fee_estimator, logger) { + log_debug!(logger, "Initiating fee rate bump from {} s/kWU ({} s) to {} s/kWU ({} s) using {:?} strategy", previous_feerate, previous_fee, new_feerate, new_fee, feerate_strategy); match feerate_strategy { FeerateStrategy::RetryPrevious => { let previous_fee = previous_feerate * predicted_weight / 1000; @@ -1297,15 +1301,12 @@ where // ...else just increase the previous feerate by 25% (because that's a nice number) let bumped_feerate = previous_feerate + (previous_feerate / 4); let bumped_fee = bumped_feerate * predicted_weight / 1000; - if input_amounts <= bumped_fee { - log_warn!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts); - return None; - } + (bumped_fee, bumped_feerate) }, } } else { - log_warn!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts); + log_warn!(logger, "Can't bump new claiming tx, input amount {} is too small", input_amounts); return None; }; @@ -1316,22 +1317,31 @@ where return Some((new_fee, new_feerate)); } - let previous_fee = previous_feerate * predicted_weight / 1000; let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * predicted_weight / 1000; // BIP 125 Opt-in Full Replace-by-Fee Signaling // * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions. // * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting. - let new_fee = if new_fee < previous_fee + min_relay_fee { - new_fee + previous_fee + min_relay_fee - new_fee - } else { - new_fee - }; - Some((new_fee, new_fee * 1000 / predicted_weight)) + let naive_new_fee = new_fee; + let new_fee = cmp::max(new_fee, previous_fee + min_relay_fee); + + if new_fee > naive_new_fee { + log_debug!(logger, "Naive fee bump of {}s does not meet min relay fee requirements of {}s", naive_new_fee - previous_fee, min_relay_fee); + } + + let remaining_output_amount = input_amounts.saturating_sub(new_fee); + if remaining_output_amount < dust_limit_sats { + log_warn!(logger, "Can't bump new claiming tx, output amount {} would end up below dust threshold {}", remaining_output_amount, dust_limit_sats); + return None; + } + + let new_feerate = new_fee * 1000 / predicted_weight; + log_debug!(logger, "Fee rate bumped by {}s from {} s/KWU ({} s) to {} s/KWU ({} s)", new_fee - previous_fee, previous_feerate, previous_fee, new_feerate, new_fee); + Some((new_fee, new_feerate)) } #[cfg(test)] mod tests { - use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageTemplate, PackageSolvingData, RevokedHTLCOutput, RevokedOutput, WEIGHT_REVOKED_OUTPUT, weight_offered_htlc, weight_received_htlc}; + use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageTemplate, PackageSolvingData, RevokedHTLCOutput, RevokedOutput, WEIGHT_REVOKED_OUTPUT, weight_offered_htlc, weight_received_htlc, feerate_bump}; use crate::chain::Txid; use crate::ln::chan_utils::HTLCOutputInCommitment; use crate::types::payment::{PaymentPreimage, PaymentHash}; @@ -1349,7 +1359,10 @@ mod tests { use bitcoin::secp256k1::{PublicKey,SecretKey}; use bitcoin::secp256k1::Secp256k1; + use crate::chain::chaininterface::{ConfirmationTarget, FeeEstimator, FEERATE_FLOOR_SATS_PER_KW, LowerBoundedFeeEstimator}; + use crate::chain::onchaintx::FeerateStrategy; use crate::types::features::ChannelTypeFeatures; + use crate::util::test_utils::TestLogger; fn fake_txid(n: u64) -> Txid { Transaction { @@ -1659,4 +1672,84 @@ mod tests { } } } + + struct TestFeeEstimator { + sat_per_kw: u32, + } + + impl FeeEstimator for TestFeeEstimator { + fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 { + self.sat_per_kw + } + } + + #[test] + fn test_feerate_bump() { + let sat_per_kw = FEERATE_FLOOR_SATS_PER_KW; + let test_fee_estimator = &TestFeeEstimator { sat_per_kw }; + let fee_estimator = LowerBoundedFeeEstimator::new(test_fee_estimator); + let fee_rate_strategy = FeerateStrategy::ForceBump; + let confirmation_target = ConfirmationTarget::UrgentOnChainSweep; + + { + // Check underflow doesn't occur + let predicted_weight_units = 1000; + let input_satoshis = 505; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, input amount 505 is too small").unwrap(), 1); + } + + { + // Check post-25%-bump-underflow scenario satisfying the following constraints: + // input - fee = 546 + // input - fee * 1.25 = -1 + + // We accomplish that scenario with the following values: + // input = 2734 + // fee = 2188 + + let predicted_weight_units = 1000; + let input_satoshis = 2734; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 2188, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, output amount 0 would end up below dust threshold 546").unwrap(), 1); + } + + { + // Check that an output amount of 0 is caught + let predicted_weight_units = 1000; + let input_satoshis = 506; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, output amount 0 would end up below dust threshold 546").unwrap(), 1); + } + + { + // Check that dust_threshold - 1 is blocked + let predicted_weight_units = 1000; + let input_satoshis = 1051; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, output amount 545 would end up below dust threshold 546").unwrap(), 1); + } + + { + let predicted_weight_units = 1000; + let input_satoshis = 1052; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger).unwrap(); + assert_eq!(bumped_fee_rate, (506, 506)); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Naive fee bump of 63s does not meet min relay fee requirements of 253s").unwrap(), 1); + } + } } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index fcc1f8f5a64..2d01ece1158 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -19,13 +19,12 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; use crate::chain::transaction::OutPoint; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; -use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; use crate::ln::channel::{AnnouncementSigsState, ChannelPhase}; use crate::ln::msgs; use crate::ln::types::ChannelId; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use crate::util::test_channel_signer::TestChannelSigner; -use crate::util::errors::APIError; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::TestBroadcaster; @@ -133,9 +132,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); { - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[0].node.send_payment_with_route(route, payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) + ).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -190,9 +189,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[0].node.send_payment_with_route(route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) + ).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -257,9 +256,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[0].node.send_payment_with_route(route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) + ).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -2004,16 +2003,10 @@ fn test_path_paused_mpp() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - // Now check that we get the right return value, indicating that the first path succeeded but - // the second got a MonitorUpdateInProgress err. This implies - // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry. - if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment_with_route( + // The first path should have succeeded with the second getting a MonitorUpdateInProgress err. + nodes[0].node.send_payment_with_route( route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ) { - assert_eq!(results.len(), 2); - if let Ok(()) = results[0] {} else { panic!(); } - if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); } - } else { panic!(); } + ).unwrap(); check_added_monitors!(nodes[0], 2); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5e3d72a6241..c15a4bee643 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1453,15 +1453,6 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// [`msgs::RevokeAndACK`] message from the counterparty. sent_message_awaiting_response: Option, - #[cfg(any(test, fuzzing))] - // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the - // corresponding HTLC on the inbound path. If, then, the outbound path channel is - // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack - // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This - // is fine, but as a sanity check in our failure to generate the second claim, we check here - // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC. - historical_inbound_htlc_fulfills: HashSet, - /// This channel's type, as negotiated during channel open channel_type: ChannelTypeFeatures, @@ -2210,9 +2201,6 @@ impl ChannelContext where SP::Target: SignerProvider { funding_tx_broadcast_safe_event_emitted: false, channel_ready_event_emitted: false, - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: new_hash_set(), - channel_type, channel_keys_id, @@ -2443,9 +2431,6 @@ impl ChannelContext where SP::Target: SignerProvider { funding_tx_broadcast_safe_event_emitted: false, channel_ready_event_emitted: false, - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: new_hash_set(), - channel_type, channel_keys_id, @@ -4472,10 +4457,6 @@ impl Channel where } } if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and - // this is simply a duplicate claim, not previously failed and we lost funds. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return UpdateFulfillFetch::DuplicateClaim {}; } @@ -4505,8 +4486,6 @@ impl Channel where if htlc_id_arg == htlc_id { // Make sure we don't leave latest_monitor_update_id incremented here: self.context.latest_monitor_update_id -= 1; - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return UpdateFulfillFetch::DuplicateClaim {}; } }, @@ -4528,12 +4507,8 @@ impl Channel where self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; @@ -4598,14 +4573,8 @@ impl Channel where } } - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. + /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. + /// if it was already resolved). Otherwise returns `Ok`. pub fn queue_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result<(), ChannelError> where L::Target: Logger { self.fail_htlc(htlc_id_arg, err_packet, true, logger) @@ -4623,14 +4592,8 @@ impl Channel where .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) } - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. + /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. + /// if it was already resolved). Otherwise returns `Ok`. fn fail_htlc( &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool, logger: &L @@ -4648,12 +4611,8 @@ impl Channel where if htlc.htlc_id == htlc_id_arg { match htlc.state { InboundHTLCState::Committed => {}, - InboundHTLCState::LocalRemoved(ref reason) => { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { - } else { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - } - return Ok(None); + InboundHTLCState::LocalRemoved(_) => { + return Err(ChannelError::Ignore(format!("HTLC {} was already resolved", htlc.htlc_id))); }, _ => { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); @@ -4664,11 +4623,7 @@ impl Channel where } } if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this - // is simply a duplicate fail, not previously failed and we failed-back too early. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); + return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc_id_arg))); } if !self.context.channel_state.can_generate_new_commitment() { @@ -4682,17 +4637,14 @@ impl Channel where match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); + return Err(ChannelError::Ignore(format!("HTLC {} was already claimed!", htlc_id))); } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } | &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + return Err(ChannelError::Ignore(format!("HTLC {} was already pending failure", htlc_id))); } }, _ => {} @@ -9543,13 +9495,6 @@ impl Writeable for Channel where SP::Target: SignerProvider { self.context.channel_update_status.write(writer)?; - #[cfg(any(test, fuzzing))] - (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?; - #[cfg(any(test, fuzzing))] - for htlc in self.context.historical_inbound_htlc_fulfills.iter() { - htlc.write(writer)?; - } - // If the channel type is something other than only-static-remote-key, then we need to have // older clients fail to deserialize this channel at all. If the type is // only-static-remote-key, we simply consider it "default" and don't write the channel type @@ -9883,16 +9828,6 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let channel_update_status = Readable::read(reader)?; - #[cfg(any(test, fuzzing))] - let mut historical_inbound_htlc_fulfills = new_hash_set(); - #[cfg(any(test, fuzzing))] - { - let htlc_fulfills_len: u64 = Readable::read(reader)?; - for _ in 0..htlc_fulfills_len { - assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?)); - } - } - let pending_update_fee = if let Some(feerate) = pending_update_fee_value { Some((feerate, if channel_parameters.is_outbound_from_holder { FeeUpdateState::Outbound @@ -10233,9 +10168,6 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true), channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true), - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills, - channel_type: channel_type.unwrap(), channel_keys_id, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a5ae07eab7f..6c8f3139c6a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -55,9 +55,7 @@ use crate::ln::channel_state::ChannelDetails; use crate::types::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::types::features::Bolt11InvoiceFeatures; -use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, RouteParameters, Router}; -#[cfg(test)] -use crate::routing::router::Route; +use crate::routing::router::{BlindedTail, FixedRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails}; use crate::ln::msgs; use crate::ln::onion_utils; @@ -166,6 +164,8 @@ pub enum PendingHTLCRouting { short_channel_id: u64, // This should be NonZero eventually when we bump MSRV /// Set if this HTLC is being forwarded within a blinded path. blinded: Option, + /// The absolute CLTV of the inbound HTLC + incoming_cltv_expiry: Option, }, /// The onion indicates that this is a payment for an invoice (supposedly) generated by us. /// @@ -266,6 +266,14 @@ impl PendingHTLCRouting { _ => None, } } + + fn incoming_cltv_expiry(&self) -> Option { + match self { + Self::Forward { incoming_cltv_expiry, .. } => *incoming_cltv_expiry, + Self::Receive { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), + Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), + } + } } /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it @@ -386,6 +394,9 @@ pub(crate) struct HTLCPreviousHopData { // channel with a preimage provided by the forward channel. outpoint: OutPoint, counterparty_node_id: Option, + /// Used to preserve our backwards channel by failing back in case an HTLC claim in the forward + /// channel remains unconfirmed for too long. + cltv_expiry: Option, } #[derive(PartialEq, Eq)] @@ -688,6 +699,15 @@ impl HTLCSource { true } } + + /// Returns the CLTV expiry of the inbound HTLC (i.e. the source referred to by this object), + /// if the source was a forwarded HTLC and the HTLC was first forwarded on LDK 0.1.1 or later. + pub(crate) fn inbound_htlc_expiry(&self) -> Option { + match self { + Self::PreviousHopData(HTLCPreviousHopData { cltv_expiry, .. }) => *cltv_expiry, + _ => None, + } + } } /// This enum is used to specify which error data to send to peers when failing back an HTLC @@ -2397,9 +2417,6 @@ where fee_estimator: LowerBoundedFeeEstimator, chain_monitor: M, tx_broadcaster: T, - #[cfg(fuzzing)] - pub router: R, - #[cfg(not(fuzzing))] router: R, message_router: MR, @@ -4622,21 +4639,31 @@ where } } - // Deprecated send method, for testing use [`Self::send_payment`] and - // [`TestRouter::expect_find_route`] instead. - // - // [`TestRouter::expect_find_route`]: crate::util::test_utils::TestRouter::expect_find_route - #[cfg(test)] - pub(crate) fn send_payment_with_route( - &self, route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, + /// Sends a payment along a given route. See [`Self::send_payment`] for more info. + /// + /// LDK will not automatically retry this payment, though it may be manually re-sent after an + /// [`Event::PaymentFailed`] is generated. + pub fn send_payment_with_route( + &self, mut route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId - ) -> Result<(), PaymentSendFailure> { + ) -> Result<(), RetryableSendFailure> { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let route_params = route.route_params.clone().unwrap_or_else(|| { + // Create a dummy route params since they're a required parameter but unused in this case + let (payee_node_id, cltv_delta) = route.paths.first() + .and_then(|path| path.hops.last().map(|hop| (hop.pubkey, hop.cltv_expiry_delta as u32))) + .unwrap_or_else(|| (PublicKey::from_slice(&[2; 32]).unwrap(), MIN_FINAL_CLTV_EXPIRY_DELTA as u32)); + let dummy_payment_params = PaymentParameters::from_node_id(payee_node_id, cltv_delta); + RouteParameters::from_payment_params_and_value(dummy_payment_params, route.get_total_amount()) + }); + if route.route_params.is_none() { route.route_params = Some(route_params.clone()); } + let router = FixedRouter::new(route); self.pending_outbound_payments - .send_payment_with_route(&route, payment_hash, recipient_onion, payment_id, - &self.entropy_source, &self.node_signer, best_block_height, - |args| self.send_payment_along_path(args)) + .send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0), + route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(), + &self.entropy_source, &self.node_signer, best_block_height, &self.logger, + &self.pending_events, |args| self.send_payment_along_path(args)) } /// Sends a payment to the route found using the provided [`RouteParameters`], retrying failed @@ -4665,7 +4692,8 @@ where /// [`ChannelManager::list_recent_payments`] for more information. /// /// Routes are automatically found using the [`Router] provided on startup. To fix a route for a - /// particular payment, match the [`PaymentId`] passed to [`Router::find_route_with_id`]. + /// particular payment, use [`Self::send_payment_with_route`] or match the [`PaymentId`] passed to + /// [`Router::find_route_with_id`]. /// /// [`Event::PaymentSent`]: events::Event::PaymentSent /// [`Event::PaymentFailed`]: events::Event::PaymentFailed @@ -5535,9 +5563,9 @@ where })?; let routing = match payment.forward_info.routing { - PendingHTLCRouting::Forward { onion_packet, blinded, .. } => { + PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => { PendingHTLCRouting::Forward { - onion_packet, blinded, short_channel_id: next_hop_scid + onion_packet, blinded, incoming_cltv_expiry, short_channel_id: next_hop_scid, } }, _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted @@ -5576,7 +5604,7 @@ where err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) })?; - if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing { + if let PendingHTLCRouting::Forward { short_channel_id, incoming_cltv_expiry, .. } = payment.forward_info.routing { let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: payment.prev_short_channel_id, user_channel_id: Some(payment.prev_user_channel_id), @@ -5587,6 +5615,7 @@ where incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, phantom_shared_secret: None, blinded_failure: payment.forward_info.routing.blinded_failure(), + cltv_expiry: incoming_cltv_expiry, }); let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10); @@ -5761,6 +5790,7 @@ where outgoing_cltv_value, .. } }) => { + let cltv_expiry = routing.incoming_cltv_expiry(); macro_rules! failure_handler { ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash)); @@ -5776,6 +5806,7 @@ where incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret: $phantom_ss, blinded_failure: routing.blinded_failure(), + cltv_expiry, }); let reason = if $next_hop_unknown { @@ -5885,7 +5916,7 @@ where prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, routing: PendingHTLCRouting::Forward { - ref onion_packet, blinded, .. + ref onion_packet, blinded, incoming_cltv_expiry, .. }, skimmed_fee_msat, .. }, }) => { @@ -5900,6 +5931,7 @@ where // Phantom payments are only PendingHTLCRouting::Receive. phantom_shared_secret: None, blinded_failure: blinded.map(|b| b.failure), + cltv_expiry: incoming_cltv_expiry, }); let next_blinding_point = blinded.and_then(|b| { b.next_blinding_override.or_else(|| { @@ -6015,7 +6047,6 @@ where // fail-backs are best-effort, we probably already have one // pending, and if not that's OK, if not, the channel is on // the chain and sending the HTLC-Timeout is their problem. - continue; } } } @@ -6074,6 +6105,7 @@ where incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret, blinded_failure, + cltv_expiry: Some(cltv_expiry), }, // We differentiate the received value from the sender intended value // if possible so that we don't prematurely mark MPP payments complete @@ -6107,6 +6139,7 @@ where incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, phantom_shared_secret, blinded_failure, + cltv_expiry: Some(cltv_expiry), }), payment_hash, HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), HTLCDestination::FailedPayment { payment_hash: $payment_hash }, @@ -8982,6 +9015,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ incoming_packet_shared_secret: forward_info.incoming_shared_secret, phantom_shared_secret: None, blinded_failure: forward_info.routing.blinded_failure(), + cltv_expiry: forward_info.routing.incoming_cltv_expiry(), }); failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, @@ -11145,6 +11179,7 @@ where outpoint: htlc.prev_funding_outpoint, channel_id: htlc.prev_channel_id, blinded_failure: htlc.forward_info.routing.blinded_failure(), + cltv_expiry: htlc.forward_info.routing.incoming_cltv_expiry(), }); let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing { @@ -12372,6 +12407,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (0, onion_packet, required), (1, blinded, option), (2, short_channel_id, required), + (3, incoming_cltv_expiry, option), }, (1, Receive) => { (0, payment_data, required), @@ -12487,6 +12523,7 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (2, outpoint, required), (3, blinded_failure, option), (4, htlc_id, required), + (5, cltv_expiry, option), (6, incoming_packet_shared_secret, required), (7, user_channel_id, option), // Note that by the time we get past the required read for type 2 above, outpoint will be @@ -14363,7 +14400,7 @@ mod tests { use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; - use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId}; + use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, RecipientOnionFields, InterceptId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, ErrorAction}; use crate::ln::msgs::ChannelMessageHandler; @@ -14789,14 +14826,18 @@ mod tests { route.paths[1].hops[0].short_channel_id = chan_2_id; route.paths[1].hops[1].short_channel_id = chan_4_id; - match nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)) - .unwrap_err() { - PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => { - assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) - }, - _ => panic!("unexpected error") + nodes[0].node.send_payment_with_route(route, payment_hash, + RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { reason, .. } => { + assert_eq!(reason.unwrap(), crate::events::PaymentFailureReason::UnexpectedError); + } + _ => panic!() } + nodes[0].logger.assert_log_contains("lightning::ln::outbound_payment", "Payment secret is required for multi-path payments", 2); + assert!(nodes[0].node.list_recent_payments().is_empty()); } #[test] diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index b4f172b4a27..63341969326 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1064,30 +1064,27 @@ macro_rules! get_local_commitment_txn { /// Check the error from attempting a payment. #[macro_export] macro_rules! unwrap_send_err { - ($res: expr, $all_failed: expr, $type: pat, $check: expr) => { - match &$res { - &Err(PaymentSendFailure::AllFailedResendSafe(ref fails)) if $all_failed => { - assert_eq!(fails.len(), 1); - match fails[0] { - $type => { $check }, - _ => panic!(), - } - }, - &Err(PaymentSendFailure::PartialFailure { ref results, .. }) if !$all_failed => { - assert_eq!(results.len(), 1); - match results[0] { - Err($type) => { $check }, - _ => panic!(), - } - }, - &Err(PaymentSendFailure::PathParameterError(ref result)) if !$all_failed => { - assert_eq!(result.len(), 1); - match result[0] { - Err($type) => { $check }, - _ => panic!(), + ($node: expr, $res: expr, $all_failed: expr, $type: pat, $check: expr) => { + assert!($res.is_ok()); + let events = $node.node.get_and_clear_pending_events(); + assert!(events.len() == 2); + match &events[0] { + crate::events::Event::PaymentPathFailed { failure, .. } => { + match failure { + crate::events::PathFailure::InitialSend { err } => { + match err { + $type => { $check }, + _ => panic!() + } + }, + _ => panic!() } }, - _ => {panic!()}, + _ => panic!() + } + match &events[1] { + crate::events::Event::PaymentFailed { .. } => {}, + _ => panic!() } } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e52870cf19d..b29ee99e077 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -23,7 +23,7 @@ use crate::events::{Event, FundingInfo, MessageSendEvent, MessageSendEventsProvi use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase}; -use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; +use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; use crate::ln::{chan_utils, onion_utils}; use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; @@ -1187,7 +1187,7 @@ fn holding_cell_htlc_counting() { // the holding cell waiting on B's RAA to send. At this point we should not be able to add // another HTLC. { - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, payment_hash_1, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, payment_hash_1, RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1313,18 +1313,22 @@ fn test_duplicate_htlc_different_direction_onchain() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + // post-bump fee (288 satoshis) + dust threshold for output type (294 satoshis) = 582 + let payment_value_sats = 582; + let payment_value_msats = payment_value_sats * 1000; + // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000); - let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_value_msats); let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap(); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret); + send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], payment_value_msats, payment_hash, node_a_payment_secret); // Provide preimage to node 0 by claiming payment nodes[0].node.claim_funds(payment_preimage); - expect_payment_claimed!(nodes[0], payment_hash, 800_000); + expect_payment_claimed!(nodes[0], payment_hash, payment_value_msats); check_added_monitors!(nodes[0], 1); // Broadcast node 1 commitment txn @@ -1333,7 +1337,7 @@ fn test_duplicate_htlc_different_direction_onchain() { assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound let mut has_both_htlcs = 0; // check htlcs match ones committed for outp in remote_txn[0].output.iter() { - if outp.value.to_sat() == 800_000 / 1000 { + if outp.value.to_sat() == payment_value_sats { has_both_htlcs += 1; } else if outp.value.to_sat() == 900_000 / 1000 { has_both_htlcs += 1; @@ -1353,18 +1357,15 @@ fn test_duplicate_htlc_different_direction_onchain() { check_spends!(claim_txn[1], remote_txn[0]); check_spends!(claim_txn[2], remote_txn[0]); let preimage_tx = &claim_txn[0]; - let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output { - (&claim_txn[1], &claim_txn[2]) - } else { - (&claim_txn[2], &claim_txn[1]) - }; + let timeout_tx = claim_txn.iter().skip(1).find(|t| t.input[0].previous_output != preimage_tx.input[0].previous_output).unwrap(); + let preimage_bump_tx = claim_txn.iter().skip(1).find(|t| t.input[0].previous_output == preimage_tx.input[0].previous_output).unwrap(); assert_eq!(preimage_tx.input.len(), 1); assert_eq!(preimage_bump_tx.input.len(), 1); assert_eq!(preimage_tx.input.len(), 1); assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx - assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), 800); + assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), payment_value_sats); assert_eq!(timeout_tx.input.len(), 1); assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx @@ -1410,14 +1411,8 @@ fn test_basic_channel_reserve() { get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); route.paths[0].hops.last_mut().unwrap().fee_msat += 1; let err = nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap(); - match err { - PaymentSendFailure::AllFailedResendSafe(ref fails) => { - if let &APIError::ChannelUnavailable { .. } = &fails[0] {} - else { panic!("Unexpected error variant"); } - }, - _ => panic!("Unexpected error variant"), - } + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)); + unwrap_send_err!(nodes[0], err, true, APIError::ChannelUnavailable { .. }, {} ); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); @@ -1603,7 +1598,7 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { } // However one more HTLC should be significantly over the reserve amount and fail. - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1703,7 +1698,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt); route.paths[0].hops[0].fee_msat += 1; - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); } @@ -1915,7 +1910,7 @@ fn test_channel_reserve_holding_cell_htlcs() { route.paths[0].hops.last_mut().unwrap().fee_msat += 1; assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1987,7 +1982,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let mut route = route_1.clone(); route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1; let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -2017,7 +2012,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); route.paths[0].hops.last_mut().unwrap().fee_msat += 1; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -2282,6 +2277,138 @@ fn channel_reserve_in_flight_removes() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); } +enum PostFailBackAction { + TimeoutOnChain, + ClaimOnChain, + FailOffChain, + ClaimOffChain, +} + +#[test] +fn test_fail_back_before_backwards_timeout() { + do_test_fail_back_before_backwards_timeout(PostFailBackAction::TimeoutOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::FailOffChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOffChain); +} + +fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBackAction) { + // Test that we fail an HTLC upstream if we are still waiting for confirmation downstream + // just before the upstream timeout expires + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + for node in nodes.iter() { + *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000; + } + + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + // Start every node on the same block height to make reasoning about timeouts easier + connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + + let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + + // Force close the B<->C channel by timing out the HTLC + let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1; + connect_blocks(&nodes[1], timeout_blocks); + let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); + check_closed_event(&nodes[1], 1, ClosureReason::HTLCsTimedOut, false, &[node_c_id], 100_000); + check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); + + // After the A<->B HTLC gets within LATENCY_GRACE_PERIOD_BLOCKS we will fail the HTLC to avoid + // the channel force-closing. Note that we already connected `TEST_FINAL_CLTV + + // LATENCY_GRACE_PERIOD_BLOCKS` blocks above, so we subtract that from the HTLC expiry (which + // is `TEST_FINAL_CLTV` + `MIN_CLTV_EXPIRY_DELTA`). + let upstream_timeout_blocks = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS * 2; + connect_blocks(&nodes[1], upstream_timeout_blocks); + + // Connect blocks for nodes[0] to make sure they don't go on-chain + connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks); + + // Check that nodes[1] fails the HTLC upstream + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }]); + check_added_monitors!(nodes[1], 1); + let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; + + nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, + PaymentFailedConditions::new().blamed_chan_closed(true)); + + // Make sure we handle possible duplicate fails or extra messages after failing back + match post_fail_back_action { + PostFailBackAction::TimeoutOnChain => { + // Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again + mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + // Expect handling another fail back event, but the HTLC is already gone + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }]); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOnChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors!(nodes[2], 1); + get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + + connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); + let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS); + check_closed_broadcast!(nodes[2], true); + check_closed_event(&nodes[2], 1, ClosureReason::HTLCsTimedOut, false, &[node_b_id], 100_000); + check_added_monitors!(nodes[2], 1); + + mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::FailOffChain => { + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], + vec![HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors!(nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let update_fail = commitment_update.update_fail_htlcs[0].clone(); + + nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &update_fail); + let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOffChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors!(nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone(); + + nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &update_fulfill); + let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + }; +} + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -2386,7 +2513,7 @@ fn channel_monitor_network_test() { let node2_commitment_txid; { let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE); - connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1); + connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT); node2_commitment_txid = node_txn[0].compute_txid(); @@ -3324,8 +3451,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false - , [nodes[2].node.get_our_node_id()], 100000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, + &[nodes[2].node.get_our_node_id()], 100000); let htlc_expiry = get_monitor!(nodes[1], chan_2.2).get_claimable_balances().iter().filter_map(|bal| if let Balance::MaybeTimeoutClaimableHTLC { claimable_height, .. } = bal { Some(*claimable_height) @@ -6490,10 +6617,11 @@ fn test_payment_route_reaching_same_channel_twice() { let cloned_hops = route.paths[0].hops.clone(); route.paths[0].hops.extend_from_slice(&cloned_hops); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), false, APIError::InvalidRoute { ref err }, assert_eq!(err, &"Path went through the same channel twice")); + assert!(nodes[0].node.list_recent_payments().is_empty()); } // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message. @@ -6512,7 +6640,7 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0].hops[0].fee_msat = 100; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -6529,13 +6657,13 @@ fn test_update_add_htlc_bolt2_sender_zero_value_msat() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0].hops[0].fee_msat = 0; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Cannot send 0-msat HTLC")); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 2); } #[test] @@ -6576,7 +6704,7 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::InvalidRoute { ref err }, assert_eq!(err, &"Channel CLTV overflowed?")); @@ -6620,7 +6748,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000); } - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); @@ -6644,7 +6772,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { // Manually create a route over our max in flight (which our router normally automatically // limits us to. route.paths[0].hops[0].fee_msat = max_in_flight + 1; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -7935,22 +8063,31 @@ fn test_bump_penalty_txn_on_remote_commitment() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000); - route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; - - // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC - let remote_txn = get_local_commitment_txn!(nodes[0], chan.2); - assert_eq!(remote_txn[0].output.len(), 4); - assert_eq!(remote_txn[0].input.len(), 1); - assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); - - // Claim a HTLC without revocation (provide B monitor with preimage) - nodes[1].node.claim_funds(payment_preimage); - expect_payment_claimed!(nodes[1], payment_hash, 3_000_000); - mine_transaction(&nodes[1], &remote_txn[0]); - check_added_monitors!(nodes[1], 2); - connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires + let remote_txn = { + // post-bump fee (288 satoshis) + dust threshold for output type (294 satoshis) = 582 + let htlc_value_a_msats = 582_000; + let htlc_value_b_msats = 583_000; + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); + let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value_a_msats); + route_payment(&nodes[1], &vec!(&nodes[0])[..], htlc_value_b_msats); + + // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC + let remote_txn = get_local_commitment_txn!(nodes[0], chan.2); + assert_eq!(remote_txn[0].output.len(), 4); + assert_eq!(remote_txn[0].input.len(), 1); + assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); + + // Claim a HTLC without revocation (provide B monitor with preimage) + nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], payment_hash, htlc_value_a_msats); + mine_transaction(&nodes[1], &remote_txn[0]); + check_added_monitors!(nodes[1], 2); + connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires + // depending on the block connection style, node 1 may have broadcast either 3 or 10 txs + + remote_txn + }; // One or more claim tx should have been broadcast, check it let timeout; @@ -9787,6 +9924,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); @@ -9802,7 +9941,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let conf_height = nodes[1].best_block_info().1; if !test_height_before_timelock { - connect_blocks(&nodes[1], 24 * 6); + connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS); } nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height); @@ -9821,10 +9960,6 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t &spending_txn[0] }; check_spends!(htlc_tx, node_txn[0]); - // We should also generate a SpendableOutputs event with the to_self output (as its - // timelock is up). - let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); - assert_eq!(descriptor_spend_txn.len(), 1); // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we // should immediately fail-backwards the HTLC to the previous hop, without waiting for an @@ -9843,6 +9978,18 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true); + + // We should also generate a SpendableOutputs event with the to_self output (once the + // timelock is up). + connect_blocks(&nodes[1], (BREAKDOWN_TIMEOUT as u32) - TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - 1); + let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); + assert_eq!(descriptor_spend_txn.len(), 1); + + // When the HTLC times out on the A<->B edge, the B<->C channel will fail the HTLC back to + // avoid the A<->B channel closing (even though it already has). This will generate a + // spurious HTLCHandlingFailed event. + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { node_id: Some(node_c_id), channel_id }]); } } @@ -10294,11 +10441,11 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 }; // With default dust exposure: 5000 sats if on_holder_tx { - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); } else { - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); } diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 9556e988b4e..e2c76643348 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -10,7 +10,7 @@ //! Further functional tests which test blockchain reorganizations. use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescriptor}; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource, ChannelMonitorUpdateStep}; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource, ChannelMonitorUpdateStep}; use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; @@ -246,19 +246,19 @@ fn archive_fully_resolved_monitors() { // At this point, both nodes have no more `Balance`s, but nodes[0]'s `ChannelMonitor` still // hasn't had the `MonitorEvent` that contains the preimage claimed by the `ChannelManager`. - // Thus, calling `archive_fully_resolved_channel_monitors` and waiting 4032 blocks will not - // result in the `ChannelMonitor` being archived. + // Thus, calling `archive_fully_resolved_channel_monitors` and waiting `ARCHIVAL_DELAY_BLOCKS` + // blocks will not result in the `ChannelMonitor` being archived. nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[0], 4032); + connect_blocks(&nodes[0], ARCHIVAL_DELAY_BLOCKS); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - // ...however, nodes[1]'s `ChannelMonitor` is ready to be archived, and will be in exactly 4032 - // blocks. + // ...however, nodes[1]'s `ChannelMonitor` is ready to be archived, and will be in exactly + // `ARCHIVAL_DELAY_BLOCKS` blocks. nodes[1].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[1], 4031); + connect_blocks(&nodes[1], ARCHIVAL_DELAY_BLOCKS - 1); nodes[1].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 1); connect_blocks(&nodes[1], 1); @@ -266,11 +266,11 @@ fn archive_fully_resolved_monitors() { assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 0); // Finally, we process the pending `MonitorEvent` from nodes[0], allowing the `ChannelMonitor` - // to be archived 4032 blocks later. + // to be archived `ARCHIVAL_DELAY_BLOCKS` blocks later. expect_payment_sent(&nodes[0], payment_preimage, None, true, false); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[0], 4031); + connect_blocks(&nodes[0], ARCHIVAL_DELAY_BLOCKS - 1); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); connect_blocks(&nodes[0], 1); diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 193cdd1582a..f9d4f371227 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -110,6 +110,7 @@ pub(super) fn create_fwd_pending_htlc_info( routing: PendingHTLCRouting::Forward { onion_packet: outgoing_packet, short_channel_id, + incoming_cltv_expiry: Some(msg.cltv_expiry), blinded: intro_node_blinding_point.or(msg.blinding_point) .map(|bp| BlindedForward { inbound_blinding_point: bp, diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index c0dea7df52d..7b579b7a261 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -790,23 +790,6 @@ impl OutboundPayments { best_block_height, logger, pending_events, &send_payment_along_path) } - #[cfg(test)] - pub(super) fn send_payment_with_route( - &self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, - payment_id: PaymentId, entropy_source: &ES, node_signer: &NS, best_block_height: u32, - send_payment_along_path: F - ) -> Result<(), PaymentSendFailure> - where - ES::Target: EntropySource, - NS::Target: NodeSigner, - F: Fn(SendAlongPathArgs) -> Result<(), APIError> - { - let onion_session_privs = self.add_new_pending_payment(payment_hash, recipient_onion.clone(), payment_id, None, route, None, None, entropy_source, best_block_height)?; - self.pay_route_internal(route, payment_hash, &recipient_onion, None, None, payment_id, None, - onion_session_privs, node_signer, best_block_height, &send_payment_along_path) - .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e }) - } - pub(super) fn send_spontaneous_payment( &self, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, @@ -983,7 +966,7 @@ impl OutboundPayments { let result = self.pay_route_internal( &route, payment_hash, &recipient_onion, keysend_preimage, invoice_request, payment_id, - Some(route_params.final_value_msat), onion_session_privs, node_signer, best_block_height, + Some(route_params.final_value_msat), &onion_session_privs, node_signer, best_block_height, &send_payment_along_path ); log_info!( @@ -992,9 +975,9 @@ impl OutboundPayments { ); if let Err(e) = result { self.handle_pay_route_err( - e, payment_id, payment_hash, route, route_params, router, first_hops, - &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, - pending_events, &send_payment_along_path + e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, + &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, + &send_payment_along_path ); } Ok(()) @@ -1269,12 +1252,16 @@ impl OutboundPayments { })?; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, - keysend_preimage, None, payment_id, None, onion_session_privs, node_signer, + keysend_preimage, None, payment_id, None, &onion_session_privs, node_signer, best_block_height, &send_payment_along_path); log_info!(logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, res); if let Err(e) = res { - self.handle_pay_route_err(e, payment_id, payment_hash, route, route_params, router, first_hops, &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path); + self.handle_pay_route_err( + e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, + &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, + &send_payment_along_path + ); } Ok(()) } @@ -1426,19 +1413,25 @@ impl OutboundPayments { } }; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, - invoice_request.as_ref(), payment_id, Some(total_msat), onion_session_privs, node_signer, + invoice_request.as_ref(), payment_id, Some(total_msat), &onion_session_privs, node_signer, best_block_height, &send_payment_along_path); log_info!(logger, "Result retrying payment id {}: {:?}", &payment_id, res); if let Err(e) = res { - self.handle_pay_route_err(e, payment_id, payment_hash, route, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path); + self.handle_pay_route_err( + e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, + inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, + send_payment_along_path + ); } } fn handle_pay_route_err( &self, err: PaymentSendFailure, payment_id: PaymentId, payment_hash: PaymentHash, route: Route, - mut route_params: RouteParameters, router: &R, first_hops: Vec, - inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, logger: &L, - pending_events: &Mutex)>>, send_payment_along_path: &SP, + mut route_params: RouteParameters, onion_session_privs: Vec<[u8; 32]>, router: &R, + first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, + best_block_height: u32, logger: &L, + pending_events: &Mutex)>>, + send_payment_along_path: &SP, ) where R::Target: Router, @@ -1450,10 +1443,24 @@ impl OutboundPayments { { match err { PaymentSendFailure::AllFailedResendSafe(errs) => { + self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), logger, pending_events); self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path); }, PaymentSendFailure::PartialFailure { failed_paths_retry: Some(mut retry), results, .. } => { + debug_assert_eq!(results.len(), route.paths.len()); + debug_assert_eq!(results.len(), onion_session_privs.len()); + let failed_paths = results.iter().zip(route.paths.iter().zip(onion_session_privs.iter())) + .filter_map(|(path_res, (path, session_priv))| { + match path_res { + // While a MonitorUpdateInProgress is an Err(_), the payment is still + // considered "in flight" and we shouldn't remove it from the + // PendingOutboundPayment set. + Ok(_) | Err(APIError::MonitorUpdateInProgress) => None, + _ => Some((path, session_priv)) + } + }); + self.remove_session_privs(payment_id, failed_paths); Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), logger, pending_events); // Some paths were sent, even if we failed to send the full MPP value our recipient may // misbehave and claim the funds, at which point we have to consider the payment sent, so @@ -1467,11 +1474,13 @@ impl OutboundPayments { }, PaymentSendFailure::PathParameterError(results) => { log_error!(logger, "Failed to send to route due to parameter error in a single path. Your router is buggy"); + self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), logger, pending_events); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::ParameterError(e) => { log_error!(logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e); + self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::DuplicatePayment => debug_assert!(false), // unreachable @@ -1511,6 +1520,21 @@ impl OutboundPayments { } } + // If a payment fails after adding the pending payment but before any HTLCs are locked into + // channels, we need to clear the session_privs in order for abandoning the payment to succeed. + fn remove_session_privs<'a, I: Iterator>( + &self, payment_id: PaymentId, path_session_priv: I + ) { + if let Some(payment) = self.pending_outbound_payments.lock().unwrap().get_mut(&payment_id) { + for (path, session_priv_bytes) in path_session_priv { + let removed = payment.remove(session_priv_bytes, Some(path)); + debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers"); + } + } else { + debug_assert!(false, "This can't happen as the payment was added by callers"); + } + } + pub(super) fn send_probe( &self, path: Path, probing_cookie_secret: [u8; 32], entropy_source: &ES, node_signer: &NS, best_block_height: u32, send_payment_along_path: F @@ -1542,7 +1566,7 @@ impl OutboundPayments { let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); match self.pay_route_internal(&route, payment_hash, &recipient_onion_fields, - None, None, payment_id, None, onion_session_privs, node_signer, best_block_height, + None, None, payment_id, None, &onion_session_privs, node_signer, best_block_height, &send_payment_along_path ) { Ok(()) => Ok((payment_hash, payment_id)), @@ -1733,7 +1757,7 @@ impl OutboundPayments { fn pay_route_internal( &self, route: &Route, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, - payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>, + payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: &Vec<[u8; 32]>, node_signer: &NS, best_block_height: u32, send_payment_along_path: &F ) -> Result<(), PaymentSendFailure> where @@ -1784,30 +1808,12 @@ impl OutboundPayments { let cur_height = best_block_height + 1; let mut results = Vec::new(); debug_assert_eq!(route.paths.len(), onion_session_privs.len()); - for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.into_iter()) { - let mut path_res = send_payment_along_path(SendAlongPathArgs { + for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) { + let path_res = send_payment_along_path(SendAlongPathArgs { path: &path, payment_hash: &payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage: &keysend_preimage, invoice_request, - session_priv_bytes + session_priv_bytes: *session_priv_bytes }); - match path_res { - Ok(_) => {}, - Err(APIError::MonitorUpdateInProgress) => { - // While a MonitorUpdateInProgress is an Err(_), the payment is still - // considered "in flight" and we shouldn't remove it from the - // PendingOutboundPayment set. - }, - Err(_) => { - let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); - if let Some(payment) = pending_outbounds.get_mut(&payment_id) { - let removed = payment.remove(&session_priv_bytes, Some(path)); - debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers"); - } else { - debug_assert!(false, "This can't happen as the payment was added by callers"); - path_res = Err(APIError::APIMisuseError { err: "Internal error: payment disappeared during processing. Please report this bug!".to_owned() }); - } - } - } results.push(path_res); } let mut has_ok = false; @@ -1872,7 +1878,7 @@ impl OutboundPayments { F: Fn(SendAlongPathArgs) -> Result<(), APIError>, { self.pay_route_internal(route, payment_hash, &recipient_onion, - keysend_preimage, None, payment_id, recv_value_msat, onion_session_privs, + keysend_preimage, None, payment_id, recv_value_msat, &onion_session_privs, node_signer, best_block_height, &send_payment_along_path) .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e }) } @@ -1880,9 +1886,15 @@ impl OutboundPayments { // If we failed to send any paths, remove the new PaymentId from the `pending_outbound_payments` // map as the payment is free to be resent. fn remove_outbound_if_all_failed(&self, payment_id: PaymentId, err: &PaymentSendFailure) { - if let &PaymentSendFailure::AllFailedResendSafe(_) = err { - let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some(); - debug_assert!(removed, "We should always have a pending payment to remove here"); + match err { + PaymentSendFailure::AllFailedResendSafe(_) + | PaymentSendFailure::ParameterError(_) + | PaymentSendFailure::PathParameterError(_) => + { + let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some(); + debug_assert!(removed, "We should always have a pending payment to remove here"); + }, + PaymentSendFailure::DuplicatePayment | PaymentSendFailure::PartialFailure { .. } => {} } } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0c9c5d0e920..0963ed0aa4f 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -16,7 +16,7 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATE use crate::sign::EntropySource; use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason, PaymentPurpose}; use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI}; -use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; +use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; use crate::ln::msgs; use crate::ln::types::ChannelId; @@ -24,7 +24,7 @@ use crate::types::payment::{PaymentHash, PaymentSecret, PaymentPreimage}; use crate::ln::chan_utils; use crate::ln::msgs::ChannelMessageHandler; use crate::ln::onion_utils; -use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, Retry, RetryableSendFailure}; +use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, ProbeSendFailure, Retry, RetryableSendFailure}; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters}; use crate::routing::scoring::ChannelUsage; @@ -599,7 +599,7 @@ fn no_pending_leak_on_initial_send_failure() { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Peer for first hop currently disconnected")); @@ -946,7 +946,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // confirming, we will fail as it's considered still-pending... let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 }); match nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -985,7 +985,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); match nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1004,7 +1004,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); match nodes[0].node.send_payment_with_route(new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1249,6 +1249,7 @@ fn sent_probe_is_probe_of_sending_node() { // First check we refuse to build a single-hop probe let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000); assert!(nodes[0].node.send_probe(route.paths[0].clone()).is_err()); + assert!(nodes[0].node.list_recent_payments().is_empty()); // Then build an actual two-hop probing path let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000); @@ -1547,7 +1548,7 @@ fn claimed_send_payment_idempotent() { let send_result = nodes[0].node.send_payment_with_route(route.clone(), second_payment_hash, RecipientOnionFields::secret_only(second_payment_secret), payment_id); match send_result { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), } @@ -1626,7 +1627,7 @@ fn abandoned_send_payment_idempotent() { let send_result = nodes[0].node.send_payment_with_route(route.clone(), second_payment_hash, RecipientOnionFields::secret_only(second_payment_secret), payment_id); match send_result { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), } @@ -4375,3 +4376,106 @@ fn test_non_strict_forwarding() { let events = nodes[0].node.get_and_clear_pending_events(); expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().blamed_scid(routed_scid)); } + +#[test] +fn remove_pending_outbounds_on_buggy_router() { + // Ensure that if a payment errors due to a bogus route, we'll abandon the payment and remove the + // pending outbound from storage. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let amt_msat = 10_000; + let payment_id = PaymentId([42; 32]); + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + + // Extend the path by itself, essentially simulating route going through same channel twice + let cloned_hops = route.paths[0].hops.clone(); + route.paths[0].hops.extend_from_slice(&cloned_hops); + let route_params = route.route_params.clone().unwrap(); + nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); + + nodes[0].node.send_payment( + payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id, route_params, + Retry::Attempts(1) // Even though another attempt is allowed, the payment should fail + ).unwrap(); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match &events[0] { + Event::PaymentPathFailed { failure, payment_failed_permanently, .. } => { + assert_eq!(failure, &PathFailure::InitialSend { + err: APIError::InvalidRoute { err: "Path went through the same channel twice".to_string() } + }); + assert!(!payment_failed_permanently); + }, + _ => panic!() + } + match events[1] { + Event::PaymentFailed { reason, .. } => { + assert_eq!(reason.unwrap(), PaymentFailureReason::UnexpectedError); + }, + _ => panic!() + } + assert!(nodes[0].node.list_recent_payments().is_empty()); +} + +#[test] +fn remove_pending_outbound_probe_on_buggy_path() { + // Ensure that if a probe errors due to a bogus route, we'll return an error and remove the + // pending outbound from storage. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let amt_msat = 10_000; + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let (mut route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + + // Extend the path by itself, essentially simulating route going through same channel twice + let cloned_hops = route.paths[0].hops.clone(); + route.paths[0].hops.extend_from_slice(&cloned_hops); + + assert_eq!( + nodes[0].node.send_probe(route.paths.pop().unwrap()).unwrap_err(), + ProbeSendFailure::ParameterError( + APIError::InvalidRoute { err: "Path went through the same channel twice".to_string() } + ) + ); + assert!(nodes[0].node.list_recent_payments().is_empty()); +} + +#[test] +fn pay_route_without_params() { + // Make sure we can use ChannelManager::send_payment_with_route to pay a route where + // Route::route_parameters is None. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let amt_msat = 10_000; + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + route.route_params.take(); + nodes[0].node.send_payment_with_route( + route, payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0) + ).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), node_1_msgs, true, None); + claim_payment_along_route( + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], payment_preimage) + ); +} diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 9fd428329af..960dc441ae5 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -15,7 +15,7 @@ use crate::chain::ChannelMonitorUpdateStatus; use crate::chain::transaction::OutPoint; use crate::events::{Event, MessageSendEvent, HTLCDestination, MessageSendEventsProvider, ClosureReason}; use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState}; -use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, Retry}; +use crate::ln::channelmanager::{self, PaymentId, RecipientOnionFields, Retry}; use crate::routing::router::{PaymentParameters, get_route, RouteParameters}; use crate::ln::msgs; use crate::ln::types::ChannelId; @@ -364,10 +364,10 @@ fn updates_shutdown_wait() { let route_params = RouteParameters::from_payment_params_and_value(payment_params_2, 100_000); let route_2 = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None, &logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route_1, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route_1, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable {..}, {}); - unwrap_send_err!(nodes[1].node.send_payment_with_route(route_2, payment_hash, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route_2, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable {..}, {}); diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 78a93aa0d39..04f55837267 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -25,6 +25,7 @@ use crate::offers::invoice::Bolt12Invoice; use crate::routing::gossip::{DirectedChannelInfo, EffectiveCapacity, ReadOnlyNetworkGraph, NetworkGraph, NodeId}; use crate::routing::scoring::{ChannelUsage, LockableScore, ScoreLookUp}; use crate::sign::EntropySource; +use crate::sync::Mutex; use crate::util::ser::{Writeable, Readable, ReadableArgs, Writer}; use crate::util::logger::{Level, Logger}; use crate::crypto::chacha20::ChaCha20; @@ -185,6 +186,45 @@ impl>, L: Deref, ES: Deref, S: Deref, SP: Size } } +/// A `Router` that returns a fixed route one time, erroring otherwise. Useful for +/// `ChannelManager::send_payment_with_route` to support sending to specific routes without +/// requiring a custom `Router` implementation. +pub(crate) struct FixedRouter { + // Use an `Option` to avoid needing to clone the route when `find_route` is called. + route: Mutex>, +} + +impl FixedRouter { + pub(crate) fn new(route: Route) -> Self { + Self { route: Mutex::new(Some(route)) } + } +} + +impl Router for FixedRouter { + fn find_route( + &self, _payer: &PublicKey, _route_params: &RouteParameters, + _first_hops: Option<&[&ChannelDetails]>, _inflight_htlcs: InFlightHtlcs + ) -> Result { + self.route.lock().unwrap().take().ok_or_else(|| { + LightningError { + err: "Can't use this router to return multiple routes".to_owned(), + action: ErrorAction::IgnoreError, + } + }) + } + + fn create_blinded_payment_paths< + T: secp256k1::Signing + secp256k1::Verification + > ( + &self, _recipient: PublicKey, _first_hops: Vec, _tlvs: ReceiveTlvs, + _amount_msats: u64, _secp_ctx: &Secp256k1 + ) -> Result, ()> { + // Should be unreachable as this router is only intended to provide a one-time payment route. + debug_assert!(false); + Err(()) + } +} + /// A trait defining behavior for routing a payment. pub trait Router { /// Finds a [`Route`] for a payment between the given `payer` and a payee. diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 2be0cb39f4f..7aa41531ce2 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -538,6 +538,15 @@ impl SpendableOutputDescriptor { }; Ok((psbt, expected_max_weight)) } + + /// Returns the outpoint of the spendable output. + pub fn outpoint(&self) -> OutPoint { + match self { + Self::StaticOutput { outpoint, .. } => *outpoint, + Self::StaticPaymentOutput(descriptor) => descriptor.outpoint, + Self::DelayedPaymentOutput(descriptor) => descriptor.outpoint, + } + } } /// The parameters required to derive a channel signer via [`SignerProvider`]. diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index b61306194df..0022e5286d2 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -9,7 +9,7 @@ //! sweeping them. use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use crate::chain::channelmonitor::ANTI_REORG_DELAY; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS}; use crate::chain::{self, BestBlock, Confirm, Filter, Listen, WatchedOutput}; use crate::io; use crate::ln::msgs::DecodeError; @@ -32,6 +32,9 @@ use bitcoin::{BlockHash, Transaction, Txid}; use core::ops::Deref; +/// The number of blocks we wait before we prune the tracked spendable outputs. +pub const PRUNE_DELAY_BLOCKS: u32 = ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY; + /// The state of a spendable output currently tracked by an [`OutputSweeper`]. #[derive(Clone, Debug, PartialEq, Eq)] pub struct TrackedSpendableOutput { @@ -71,13 +74,7 @@ impl TrackedSpendableOutput { /// Returns whether the output is spent in the given transaction. pub fn is_spent_in(&self, tx: &Transaction) -> bool { - let prev_outpoint = match &self.descriptor { - SpendableOutputDescriptor::StaticOutput { outpoint, .. } => *outpoint, - SpendableOutputDescriptor::DelayedPaymentOutput(output) => output.outpoint, - SpendableOutputDescriptor::StaticPaymentOutput(output) => output.outpoint, - } - .into_bitcoin_outpoint(); - + let prev_outpoint = self.descriptor.outpoint().into_bitcoin_outpoint(); tx.input.iter().any(|input| input.previous_output == prev_outpoint) } } @@ -107,7 +104,11 @@ pub enum OutputSpendStatus { latest_spending_tx: Transaction, }, /// A transaction spending the output has been confirmed on-chain but will be tracked until it - /// reaches [`ANTI_REORG_DELAY`] confirmations. + /// reaches at least [`PRUNE_DELAY_BLOCKS`] confirmations to ensure [`Event::SpendableOutputs`] + /// stemming from lingering [`ChannelMonitor`]s can safely be replayed. + /// + /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs + /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor PendingThresholdConfirmations { /// The hash of the chain tip when we first broadcast a transaction spending this output. first_broadcast_hash: BlockHash, @@ -530,7 +531,9 @@ where // Prune all outputs that have sufficient depth by now. sweeper_state.outputs.retain(|o| { if let Some(confirmation_height) = o.status.confirmation_height() { - if cur_height >= confirmation_height + ANTI_REORG_DELAY - 1 { + // We wait at least `PRUNE_DELAY_BLOCKS` as before that + // `Event::SpendableOutputs` from lingering monitors might get replayed. + if cur_height >= confirmation_height + PRUNE_DELAY_BLOCKS - 1 { log_debug!(self.logger, "Pruning swept output as sufficiently confirmed via spend in transaction {:?}. Pruned descriptor: {:?}", o.status.latest_spending_tx().map(|t| t.compute_txid()), o.descriptor diff --git a/pending_changelog/3531-buggy-router-leak.txt b/pending_changelog/3531-buggy-router-leak.txt new file mode 100644 index 00000000000..72714aa8a8b --- /dev/null +++ b/pending_changelog/3531-buggy-router-leak.txt @@ -0,0 +1,4 @@ +## Bug Fixes + +* Fixed a rare case where a custom router returning a buggy route could result in holding onto a + pending payment forever and in some cases failing to generate a PaymentFailed event (#3531).