diff --git a/fuzz/src/process_onion_failure.rs b/fuzz/src/process_onion_failure.rs index 2b0d8c4c72b..a28247f228a 100644 --- a/fuzz/src/process_onion_failure.rs +++ b/fuzz/src/process_onion_failure.rs @@ -115,7 +115,7 @@ fn do_test(data: &[u8], out: Out) { let path = Path { hops, blinded_tail }; let htlc_source = HTLCSource::OutboundRoute { - path, + path: path.clone(), session_priv, first_hop_htlc_msat: 0, payment_id, @@ -133,8 +133,19 @@ fn do_test(data: &[u8], out: Out) { } else { None }; - let encrypted_packet = OnionErrorPacket { data: failure_data.into(), attribution_data }; + let encrypted_packet = + OnionErrorPacket { data: failure_data.into(), attribution_data: attribution_data.clone() }; lightning::ln::process_onion_failure(&secp_ctx, &logger, &htlc_source, encrypted_packet); + + if let Some(attribution_data) = attribution_data { + lightning::ln::process_onion_success( + &secp_ctx, + &logger, + &path, + &session_priv, + attribution_data, + ); + } } /// Method that needs to be added manually, {name}_test diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index ef8f256ed5e..8e931759aa0 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2887,8 +2887,12 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id)); } - let fulfill_msg = - msgs::UpdateFulfillHTLC { channel_id: chan_id_2, htlc_id: 0, payment_preimage }; + let fulfill_msg = msgs::UpdateFulfillHTLC { + channel_id: chan_id_2, + htlc_id: 0, + payment_preimage, + attribution_data: None, + }; if second_fails { nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( @@ -2904,8 +2908,14 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1); - // Check that the message we're about to deliver matches the one generated: - assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]); + + // Check that the message we're about to deliver matches the one generated. Ignore attribution data. + assert_eq!(fulfill_msg.channel_id, cs_updates.update_fulfill_htlcs[0].channel_id); + assert_eq!(fulfill_msg.htlc_id, cs_updates.update_fulfill_htlcs[0].htlc_id); + assert_eq!( + fulfill_msg.payment_preimage, + cs_updates.update_fulfill_htlcs[0].payment_preimage + ); } nodes[1].node.handle_update_fulfill_htlc(node_c_id, &fulfill_msg); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index b516961d100..a0d31717c65 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -138,7 +138,7 @@ enum FeeUpdateState { enum InboundHTLCRemovalReason { FailRelay(msgs::OnionErrorPacket), FailMalformed(([u8; 32], u16)), - Fulfill(PaymentPreimage), + Fulfill(PaymentPreimage, Option), } /// Represents the resolution status of an inbound HTLC. @@ -234,7 +234,7 @@ impl From<&InboundHTLCState> for Option { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail), InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) => Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail), - InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) => + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_, _)) => Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill), } } @@ -266,7 +266,7 @@ impl InboundHTLCState { fn preimage(&self) -> Option { match self { - InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(preimage)) => { + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(preimage, _)) => { Some(*preimage) }, _ => None, @@ -466,6 +466,7 @@ enum HTLCUpdateAwaitingACK { }, ClaimHTLC { payment_preimage: PaymentPreimage, + attribution_data: Option, htlc_id: u64, }, FailHTLC { @@ -6204,26 +6205,31 @@ where /// /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is /// disconnected). - #[rustfmt::skip] - pub fn claim_htlc_while_disconnected_dropping_mon_update_legacy - (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) - where L::Target: Logger { + pub fn claim_htlc_while_disconnected_dropping_mon_update_legacy( + &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L, + ) where + L::Target: Logger, + { // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc` // (see equivalent if condition there). assert!(!self.context.channel_state.can_generate_new_commitment()); let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update - let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, logger); + let fulfill_resp = + self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, None, logger); self.context.latest_monitor_update_id = mon_update_id; if let UpdateFulfillFetch::NewClaim { update_blocked, .. } = fulfill_resp { assert!(update_blocked); // The HTLC must have ended up in the holding cell. } } - #[rustfmt::skip] fn get_update_fulfill_htlc( &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, - payment_info: Option, logger: &L, - ) -> UpdateFulfillFetch where L::Target: Logger { + payment_info: Option, attribution_data: Option, + logger: &L, + ) -> UpdateFulfillFetch + where + L::Target: Logger, + { // Either ChannelReady got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, @@ -6240,23 +6246,34 @@ where let mut htlc_value_msat = 0; for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { - debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array())); - log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}", - htlc.htlc_id, htlc.payment_hash, payment_preimage_arg); + debug_assert_eq!( + htlc.payment_hash, + PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()) + ); + log_debug!( + logger, + "Claiming inbound HTLC id {} with payment hash {} with preimage {}", + htlc.htlc_id, + htlc.payment_hash, + payment_preimage_arg + ); match htlc.state { InboundHTLCState::Committed => {}, InboundHTLCState::LocalRemoved(ref reason) => { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + if let &InboundHTLCRemovalReason::Fulfill(_, _) = reason { } else { log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id()); - debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); + debug_assert!( + false, + "Tried to fulfill an HTLC that was already failed" + ); } return UpdateFulfillFetch::DuplicateClaim {}; }, _ => { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); // Don't return in release mode here so that we can update channel_monitor - } + }, } pending_idx = idx; htlc_value_msat = htlc.amount_msat; @@ -6295,53 +6312,94 @@ where return UpdateFulfillFetch::DuplicateClaim {}; } }, - &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } | - &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => - { + &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } + | &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id()); // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. - debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, update_blocked: true }; + debug_assert!( + false, + "Tried to fulfill an HTLC that was already failed" + ); + return UpdateFulfillFetch::NewClaim { + monitor_update, + htlc_value_msat, + update_blocked: true, + }; } }, - _ => {} + _ => {}, } } - log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32()); + log_trace!( + logger, + "Adding HTLC claim to holding_cell in channel {}! Current state: {}", + &self.context.channel_id(), + self.context.channel_state.to_u32() + ); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { - payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, + payment_preimage: payment_preimage_arg, + htlc_id: htlc_id_arg, + attribution_data, }); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, update_blocked: true }; + return UpdateFulfillFetch::NewClaim { + monitor_update, + htlc_value_msat, + update_blocked: true, + }; } { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; if let InboundHTLCState::Committed = htlc.state { } else { - debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, update_blocked: true }; + debug_assert!( + false, + "Have an inbound HTLC we tried to claim before it was fully committed to" + ); + return UpdateFulfillFetch::NewClaim { + monitor_update, + htlc_value_msat, + update_blocked: true, + }; } - log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id); - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); + log_trace!( + logger, + "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", + &htlc.payment_hash, + &self.context.channel_id + ); + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill( + payment_preimage_arg.clone(), + attribution_data, + )); } - UpdateFulfillFetch::NewClaim { - monitor_update, - htlc_value_msat, - update_blocked: false, - } + UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, update_blocked: false } } - #[rustfmt::skip] pub fn get_update_fulfill_htlc_and_commit( &mut self, htlc_id: u64, payment_preimage: PaymentPreimage, - payment_info: Option, logger: &L, - ) -> UpdateFulfillCommitFetch where L::Target: Logger { + payment_info: Option, attribution_data: Option, + logger: &L, + ) -> UpdateFulfillCommitFetch + where + L::Target: Logger, + { let release_cs_monitor = self.context.blocked_monitor_updates.is_empty(); - match self.get_update_fulfill_htlc(htlc_id, payment_preimage, payment_info, logger) { - UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, update_blocked } => { + match self.get_update_fulfill_htlc( + htlc_id, + payment_preimage, + payment_info, + attribution_data, + logger, + ) { + UpdateFulfillFetch::NewClaim { + mut monitor_update, + htlc_value_msat, + update_blocked, + } => { // Even if we aren't supposed to let new monitor updates with commitment state // updates run, we still need to push the preimage ChannelMonitorUpdateStep no // matter what. Sadly, to push a new monitor update which flies before others @@ -6354,8 +6412,12 @@ where self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); } else { - let new_mon_id = self.context.blocked_monitor_updates.get(0) - .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id); + let new_mon_id = self + .context + .blocked_monitor_updates + .get(0) + .map(|upd| upd.update.update_id) + .unwrap_or(monitor_update.update_id); monitor_update.update_id = new_mon_id; for held_update in self.context.blocked_monitor_updates.iter_mut() { held_update.update.update_id += 1; @@ -6363,14 +6425,21 @@ where if !update_blocked { debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set"); let update = self.build_commitment_no_status_check(logger); - self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate { - update, - }); + self.context + .blocked_monitor_updates + .push(PendingChannelMonitorUpdate { update }); } } - self.monitor_updating_paused(false, !update_blocked, false, Vec::new(), Vec::new(), Vec::new()); - UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, } + self.monitor_updating_paused( + false, + !update_blocked, + false, + Vec::new(), + Vec::new(), + Vec::new(), + ); + UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat } }, UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {}, } @@ -6657,19 +6726,34 @@ where Err(ChannelError::close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned())) } - #[rustfmt::skip] - pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option), ChannelError> { - if self.context.channel_state.is_remote_stfu_sent() || self.context.channel_state.is_quiescent() { - return Err(ChannelError::WarnAndDisconnect("Got fulfill HTLC message while quiescent".to_owned())); + pub fn update_fulfill_htlc( + &mut self, msg: &msgs::UpdateFulfillHTLC, + ) -> Result<(HTLCSource, u64, Option, Option), ChannelError> { + if self.context.channel_state.is_remote_stfu_sent() + || self.context.channel_state.is_quiescent() + { + return Err(ChannelError::WarnAndDisconnect( + "Got fulfill HTLC message while quiescent".to_owned(), + )); } if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { - return Err(ChannelError::close("Got fulfill HTLC message when channel was not in an operational state".to_owned())); + return Err(ChannelError::close( + "Got fulfill HTLC message when channel was not in an operational state".to_owned(), + )); } if self.context.channel_state.is_peer_disconnected() { - return Err(ChannelError::close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned())); + return Err(ChannelError::close( + "Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned(), + )); } - self.mark_outbound_htlc_removed(msg.htlc_id, OutboundHTLCOutcome::Success(msg.payment_preimage)).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat)) + self.mark_outbound_htlc_removed( + msg.htlc_id, + OutboundHTLCOutcome::Success(msg.payment_preimage), + ) + .map(|htlc| { + (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat, htlc.send_timestamp) + }) } #[rustfmt::skip] @@ -6959,11 +7043,17 @@ where Ok(()) } - #[rustfmt::skip] - fn commitment_signed_update_monitor(&mut self, mut updates: Vec, logger: &L) -> Result, ChannelError> - where L::Target: Logger + fn commitment_signed_update_monitor( + &mut self, mut updates: Vec, logger: &L, + ) -> Result, ChannelError> + where + L::Target: Logger, { - if self.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { + if self + .holder_commitment_point + .advance(&self.context.holder_signer, &self.context.secp_ctx, logger) + .is_err() + { // We only fail to advance our commitment point/number if we're currently // waiting for our signer to unblock and provide a commitment point. // During post-funding channel operation, we only advance our point upon @@ -6990,7 +7080,8 @@ where if let &InboundHTLCState::RemoteAnnounced(ref htlc_resolution) = &htlc.state { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", &htlc.payment_hash, &self.context.channel_id); - htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution.clone()); + htlc.state = + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution.clone()); need_commitment = true; } } @@ -7018,8 +7109,10 @@ where for mut update in updates.iter_mut() { if let ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { - claimed_htlcs: ref mut update_claimed_htlcs, .. - } = &mut update { + claimed_htlcs: ref mut update_claimed_htlcs, + .. + } = &mut update + { debug_assert!(update_claimed_htlcs.is_empty()); *update_claimed_htlcs = claimed_htlcs.clone(); } else { @@ -7059,21 +7152,31 @@ where return Ok(self.push_ret_blockable_mon_update(monitor_update)); } - let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() { - // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - - // we'll send one right away when we get the revoke_and_ack when we - // free_holding_cell_htlcs(). - let mut additional_update = self.build_commitment_no_status_check(logger); - // build_commitment_no_status_check may bump latest_monitor_id but we want them to be - // strictly increasing by one, so decrement it here. - self.context.latest_monitor_update_id = monitor_update.update_id; - monitor_update.updates.append(&mut additional_update.updates); - true - } else { false }; + let need_commitment_signed = + if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() { + // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - + // we'll send one right away when we get the revoke_and_ack when we + // free_holding_cell_htlcs(). + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be + // strictly increasing by one, so decrement it here. + self.context.latest_monitor_update_id = monitor_update.update_id; + monitor_update.updates.append(&mut additional_update.updates); + true + } else { + false + }; log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" }); - self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + true, + need_commitment_signed, + false, + Vec::new(), + Vec::new(), + Vec::new(), + ); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -7098,18 +7201,30 @@ where /// Frees any pending commitment updates in the holding cell, generating the relevant messages /// for our counterparty. - #[rustfmt::skip] fn free_holding_cell_htlcs( - &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L + &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> (Option, Vec<(HTLCSource, PaymentHash)>) - where F::Target: FeeEstimator, L::Target: Logger + where + F::Target: FeeEstimator, + L::Target: Logger, { assert!(matches!(self.context.channel_state, ChannelState::ChannelReady(_))); assert!(!self.context.channel_state.is_monitor_update_in_progress()); assert!(!self.context.channel_state.is_quiescent()); - if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() { - log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(), - if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id()); + if self.context.holding_cell_htlc_updates.len() != 0 + || self.context.holding_cell_update_fee.is_some() + { + log_trace!( + logger, + "Freeing holding cell with {} HTLC updates{} in channel {}", + self.context.holding_cell_htlc_updates.len(), + if self.context.holding_cell_update_fee.is_some() { + " and a fee update" + } else { + "" + }, + &self.context.channel_id() + ); let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! @@ -7131,12 +7246,26 @@ where // to rebalance channels. let fail_htlc_res = match &htlc_update { &HTLCUpdateAwaitingACK::AddHTLC { - amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, - skimmed_fee_msat, blinding_point, .. + amount_msat, + cltv_expiry, + ref payment_hash, + ref source, + ref onion_routing_packet, + skimmed_fee_msat, + blinding_point, + .. } => { match self.send_htlc( - amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), - false, skimmed_fee_msat, blinding_point, fee_estimator, logger + amount_msat, + *payment_hash, + cltv_expiry, + source.clone(), + onion_routing_packet.clone(), + false, + skimmed_fee_msat, + blinding_point, + fee_estimator, + logger, ) { Ok(update_add_msg_opt) => { // `send_htlc` only returns `Ok(None)`, when an update goes into @@ -7156,11 +7285,15 @@ where // successfully forwarded/failed/fulfilled, causing our // counterparty to eventually close on us. htlcs_to_fail.push((source.clone(), *payment_hash)); - } + }, } None }, - &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => { + &HTLCUpdateAwaitingACK::ClaimHTLC { + ref payment_preimage, + htlc_id, + ref attribution_data, + } => { // If an HTLC claim was previously added to the holding cell (via // `get_update_fulfill_htlc`, then generating the claim message itself must // not fail - any in between attempts to claim the HTLC will have resulted @@ -7172,21 +7305,34 @@ where // there's no harm in including the extra `ChannelMonitorUpdateStep` here. // We do not bother to track and include `payment_info` here, however. let mut additional_monitor_update = - if let UpdateFulfillFetch::NewClaim { monitor_update, .. } = - self.get_update_fulfill_htlc(htlc_id, *payment_preimage, None, logger) - { monitor_update } else { unreachable!() }; + if let UpdateFulfillFetch::NewClaim { monitor_update, .. } = self + .get_update_fulfill_htlc( + htlc_id, + *payment_preimage, + None, + attribution_data.clone(), + logger, + ) { + monitor_update + } else { + unreachable!() + }; update_fulfill_count += 1; monitor_update.updates.append(&mut additional_monitor_update.updates); None }, - &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => { - Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger) - .map(|fail_msg_opt| fail_msg_opt.map(|_| ()))) - }, - &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { - Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger) - .map(|fail_msg_opt| fail_msg_opt.map(|_| ()))) - } + &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => Some( + self.fail_htlc(htlc_id, err_packet.clone(), false, logger) + .map(|fail_msg_opt| fail_msg_opt.map(|_| ())), + ), + &HTLCUpdateAwaitingACK::FailMalformedHTLC { + htlc_id, + failure_code, + sha256_of_onion, + } => Some( + self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger) + .map(|fail_msg_opt| fail_msg_opt.map(|_| ())), + ), }; if let Some(res) = fail_htlc_res { match res { @@ -7206,9 +7352,16 @@ where } } } - let update_fee = self.context.holding_cell_update_fee.take().and_then(|feerate| self.send_update_fee(feerate, false, fee_estimator, logger)); + let update_fee = + self.context.holding_cell_update_fee.take().and_then(|feerate| { + self.send_update_fee(feerate, false, fee_estimator, logger) + }); - if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && update_fee.is_none() { + if update_add_count == 0 + && update_fulfill_count == 0 + && update_fail_count == 0 + && update_fee.is_none() + { return (None, htlcs_to_fail); } @@ -7376,7 +7529,7 @@ where pending_inbound_htlcs.retain(|htlc| { if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash); - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + if let &InboundHTLCRemovalReason::Fulfill(_, _) = reason { value_to_self_msat_diff += htlc.amount_msat as i64; } *expecting_peer_commitment_signed = true; @@ -8201,8 +8354,12 @@ where } /// Gets the last commitment update for immediate sending to our peer. - #[rustfmt::skip] - fn get_last_commitment_update_for_send(&mut self, logger: &L) -> Result where L::Target: Logger { + fn get_last_commitment_update_for_send( + &mut self, logger: &L, + ) -> Result + where + L::Target: Logger, + { let mut update_add_htlcs = Vec::new(); let mut update_fulfill_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); @@ -8234,7 +8391,10 @@ where attribution_data: err_packet.attribution_data.clone(), }); }, - &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => { + &InboundHTLCRemovalReason::FailMalformed(( + ref sha256_of_onion, + ref failure_code, + )) => { update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC { channel_id: self.context.channel_id(), htlc_id: htlc.htlc_id, @@ -8242,42 +8402,57 @@ where failure_code: failure_code.clone(), }); }, - &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => { + &InboundHTLCRemovalReason::Fulfill( + ref payment_preimage, + ref attribution_data, + ) => { update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC { channel_id: self.context.channel_id(), htlc_id: htlc.htlc_id, payment_preimage: payment_preimage.clone(), + attribution_data: attribution_data.clone(), }); }, } } } - let update_fee = if self.funding.is_outbound() && self.context.pending_update_fee.is_some() { + let update_fee = if self.funding.is_outbound() && self.context.pending_update_fee.is_some() + { Some(msgs::UpdateFee { channel_id: self.context.channel_id(), feerate_per_kw: self.context.pending_update_fee.unwrap().0, }) - } else { None }; + } else { + None + }; log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); - let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger) { - if self.context.signer_pending_commitment_update { - log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update"); - self.context.signer_pending_commitment_update = false; - } - update - } else { - if !self.context.signer_pending_commitment_update { - log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update"); - self.context.signer_pending_commitment_update = true; - } - return Err(()); - }; + let commitment_signed = + if let Ok(update) = self.send_commitment_no_state_update(logger) { + if self.context.signer_pending_commitment_update { + log_trace!( + logger, + "Commitment update generated: clearing signer_pending_commitment_update" + ); + self.context.signer_pending_commitment_update = false; + } + update + } else { + if !self.context.signer_pending_commitment_update { + log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update"); + self.context.signer_pending_commitment_update = true; + } + return Err(()); + }; Ok(msgs::CommitmentUpdate { - update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, + update_add_htlcs, + update_fulfill_htlcs, + update_fail_htlcs, + update_fail_malformed_htlcs, + update_fee, commitment_signed, }) } @@ -12247,7 +12422,6 @@ impl Writeable for FundedChannel where SP::Target: SignerProvider, { - #[rustfmt::skip] fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. @@ -12287,7 +12461,12 @@ where // Write out the old serialization for shutdown_pubkey for backwards compatibility, if // deserialized from that format. - match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) { + match self + .context + .shutdown_scriptpubkey + .as_ref() + .and_then(|script| script.as_legacy_pubkey()) + { Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?, None => [0u8; PUBLIC_KEY_SIZE].write(writer)?, } @@ -12303,7 +12482,7 @@ where dropped_inbound_htlcs += 1; } } - let mut removed_htlc_failure_attribution_data: Vec<&Option> = Vec::new(); + let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -12329,16 +12508,20 @@ where &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; match removal_reason { - InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { data, attribution_data }) => { + InboundHTLCRemovalReason::FailRelay(msgs::OnionErrorPacket { + data, + attribution_data, + }) => { 0u8.write(writer)?; data.write(writer)?; - removed_htlc_failure_attribution_data.push(&attribution_data); + removed_htlc_attribution_data.push(&attribution_data); }, InboundHTLCRemovalReason::FailMalformed((hash, code)) => { 1u8.write(writer)?; (hash, code).write(writer)?; }, - InboundHTLCRemovalReason::Fulfill(preimage) => { + InboundHTLCRemovalReason::Fulfill(preimage, _) => { + // TODO: Persistence 2u8.write(writer)?; preimage.write(writer)?; }, @@ -12380,7 +12563,7 @@ where } let reason: Option<&HTLCFailReason> = outcome.into(); reason.write(writer)?; - } + }, &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => { 4u8.write(writer)?; if let OutboundHTLCOutcome::Success(preimage) = outcome { @@ -12388,24 +12571,32 @@ where } let reason: Option<&HTLCFailReason> = outcome.into(); reason.write(writer)?; - } + }, } pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat); pending_outbound_blinding_points.push(htlc.blinding_point); } let holding_cell_htlc_update_count = self.context.holding_cell_htlc_updates.len(); - let mut holding_cell_skimmed_fees: Vec> = Vec::with_capacity(holding_cell_htlc_update_count); - let mut holding_cell_blinding_points: Vec> = Vec::with_capacity(holding_cell_htlc_update_count); - let mut holding_cell_failure_attribution_data: Vec> = Vec::with_capacity(holding_cell_htlc_update_count); + let mut holding_cell_skimmed_fees: Vec> = + Vec::with_capacity(holding_cell_htlc_update_count); + let mut holding_cell_blinding_points: Vec> = + Vec::with_capacity(holding_cell_htlc_update_count); + let mut holding_cell_attribution_data: Vec> = + Vec::with_capacity(holding_cell_htlc_update_count); // Vec of (htlc_id, failure_code, sha256_of_onion) let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new(); (holding_cell_htlc_update_count as u64).write(writer)?; for update in self.context.holding_cell_htlc_updates.iter() { match update { &HTLCUpdateAwaitingACK::AddHTLC { - ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, - blinding_point, skimmed_fee_msat, + ref amount_msat, + ref cltv_expiry, + ref payment_hash, + ref source, + ref onion_routing_packet, + blinding_point, + skimmed_fee_msat, } => { 0u8.write(writer)?; amount_msat.write(writer)?; @@ -12417,10 +12608,17 @@ where holding_cell_skimmed_fees.push(skimmed_fee_msat); holding_cell_blinding_points.push(blinding_point); }, - &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => { + &HTLCUpdateAwaitingACK::ClaimHTLC { + ref payment_preimage, + ref htlc_id, + ref attribution_data, + } => { 1u8.write(writer)?; payment_preimage.write(writer)?; htlc_id.write(writer)?; + + // Store the attribution data for later writing. + holding_cell_attribution_data.push(attribution_data.as_ref()); }, &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => { 2u8.write(writer)?; @@ -12428,10 +12626,12 @@ where err_packet.data.write(writer)?; // Store the attribution data for later writing. - holding_cell_failure_attribution_data.push(err_packet.attribution_data.as_ref()); - } + holding_cell_attribution_data.push(err_packet.attribution_data.as_ref()); + }, &HTLCUpdateAwaitingACK::FailMalformedHTLC { - htlc_id, failure_code, sha256_of_onion + htlc_id, + failure_code, + sha256_of_onion, } => { // We don't want to break downgrading by adding a new variant, so write a dummy // `::FailHTLC` variant and write the real malformed error as an optional TLV. @@ -12443,8 +12643,8 @@ where // Push 'None' attribution data for FailMalformedHTLC, because FailMalformedHTLC uses the same // type 2 and is deserialized as a FailHTLC. - holding_cell_failure_attribution_data.push(None); - } + holding_cell_attribution_data.push(None); + }, } } @@ -12464,7 +12664,9 @@ where } (self.context.monitor_pending_failures.len() as u64).write(writer)?; - for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() { + for &(ref htlc_source, ref payment_hash, ref fail_reason) in + self.context.monitor_pending_failures.iter() + { htlc_source.write(writer)?; payment_hash.write(writer)?; fail_reason.write(writer)?; @@ -12472,7 +12674,9 @@ where if self.funding.is_outbound() { self.context.pending_update_fee.map(|(a, _)| a).write(writer)?; - } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee { + } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = + self.context.pending_update_fee + { Some(feerate).write(writer)?; } else { // As for inbound HTLCs, if the update was only announced and never committed in a @@ -12517,7 +12721,7 @@ where info.fee_proportional_millionths.write(writer)?; info.cltv_expiry_delta.write(writer)?; }, - None => 0u8.write(writer)? + None => 0u8.write(writer)?, } self.funding.channel_transaction_parameters.write(writer)?; @@ -12537,33 +12741,58 @@ where // older clients fail to deserialize this channel at all. If the type is // only-static-remote-key, we simply consider it "default" and don't write the channel type // out at all. - let chan_type = if self.funding.get_channel_type() != &ChannelTypeFeatures::only_static_remote_key() { - Some(self.funding.get_channel_type()) } else { None }; + let chan_type = + if self.funding.get_channel_type() != &ChannelTypeFeatures::only_static_remote_key() { + Some(self.funding.get_channel_type()) + } else { + None + }; // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to // a different percentage of the channel value then 10%, which older versions of LDK used // to set it to before the percentage was made configurable. let serialized_holder_selected_reserve = - if self.funding.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.funding.get_value_satoshis()) - { Some(self.funding.holder_selected_channel_reserve_satoshis) } else { None }; + if self.funding.holder_selected_channel_reserve_satoshis + != get_legacy_default_holder_selected_channel_reserve_satoshis( + self.funding.get_value_satoshis(), + ) { + Some(self.funding.holder_selected_channel_reserve_satoshis) + } else { + None + }; let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config; - old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY; + old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = + MAX_IN_FLIGHT_PERCENT_LEGACY; let serialized_holder_htlc_max_in_flight = - if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.funding.get_value_satoshis(), &old_max_in_flight_percent_config) - { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None }; + if self.context.holder_max_htlc_value_in_flight_msat + != get_holder_max_htlc_value_in_flight_msat( + self.funding.get_value_satoshis(), + &old_max_in_flight_percent_config, + ) { + Some(self.context.holder_max_htlc_value_in_flight_msat) + } else { + None + }; let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted); - let initial_channel_ready_event_emitted = Some(self.context.initial_channel_ready_event_emitted); - let funding_tx_broadcast_safe_event_emitted = Some(self.context.funding_tx_broadcast_safe_event_emitted); + let initial_channel_ready_event_emitted = + Some(self.context.initial_channel_ready_event_emitted); + let funding_tx_broadcast_safe_event_emitted = + Some(self.context.funding_tx_broadcast_safe_event_emitted); // `user_id` used to be a single u64 value. In order to remain backwards compatible with // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore, // we write the high bytes as an option here. let user_id_high_opt = Some((self.context.user_id >> 64) as u64); - let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) }; + let holder_max_accepted_htlcs = + if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { + None + } else { + Some(self.context.holder_max_accepted_htlcs) + }; let mut monitor_pending_update_adds = None; if !self.context.monitor_pending_update_adds.is_empty() { @@ -12617,8 +12846,8 @@ where (51, is_manual_broadcast, option), // Added in 0.0.124 (53, funding_tx_broadcast_safe_event_emitted, option), // Added in 0.0.124 (54, self.pending_funding, optional_vec), // Added in 0.2 - (55, removed_htlc_failure_attribution_data, optional_vec), // Added in 0.2 - (57, holding_cell_failure_attribution_data, optional_vec), // Added in 0.2 + (55, removed_htlc_attribution_data, optional_vec), // Added in 0.2 + (57, holding_cell_attribution_data, optional_vec), // Added in 0.2 (58, self.interactive_tx_signing_session, option), // Added in 0.2 (59, self.funding.minimum_depth_override, option), // Added in 0.2 (60, self.context.historical_scids, optional_vec), // Added in 0.2 @@ -12634,8 +12863,9 @@ where ES::Target: EntropySource, SP::Target: SignerProvider, { - #[rustfmt::skip] - fn read(reader: &mut R, args: (&'a ES, &'b SP, &'c ChannelTypeFeatures)) -> Result { + fn read( + reader: &mut R, args: (&'a ES, &'b SP, &'c ChannelTypeFeatures), + ) -> Result { let (entropy_source, signer_provider, our_supported_features) = args; let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); if ver <= 2 { @@ -12654,7 +12884,8 @@ where } let channel_id: ChannelId = Readable::read(reader)?; - let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?; + let channel_state = ChannelState::from_u32(Readable::read(reader)?) + .map_err(|_| DecodeError::InvalidValue)?; let channel_value_satoshis = Readable::read(reader)?; let latest_monitor_update_id = Readable::read(reader)?; @@ -12672,7 +12903,10 @@ where let pending_inbound_htlc_count: u64 = Readable::read(reader)?; - let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize)); + let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min( + pending_inbound_htlc_count as usize, + DEFAULT_MAX_HTLCS as usize, + )); for _ in 0..pending_inbound_htlc_count { pending_inbound_htlcs.push(InboundHTLCOutput { htlc_id: Readable::read(reader)?, @@ -12682,7 +12916,9 @@ where state: match ::read(reader)? { 1 => { let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? } + InboundHTLCResolution::Resolved { + pending_htlc_status: Readable::read(reader)?, + } } else { Readable::read(reader)? }; @@ -12690,7 +12926,9 @@ where }, 2 => { let resolution = if ver <= 3 { - InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? } + InboundHTLCResolution::Resolved { + pending_htlc_status: Readable::read(reader)?, + } } else { Readable::read(reader)? }; @@ -12704,7 +12942,7 @@ where attribution_data: None, }), 1 => InboundHTLCRemovalReason::FailMalformed(Readable::read(reader)?), - 2 => InboundHTLCRemovalReason::Fulfill(Readable::read(reader)?), + 2 => InboundHTLCRemovalReason::Fulfill(Readable::read(reader)?, None), _ => return Err(DecodeError::InvalidValue), }; InboundHTLCState::LocalRemoved(reason) @@ -12715,7 +12953,10 @@ where } let pending_outbound_htlc_count: u64 = Readable::read(reader)?; - let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize)); + let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min( + pending_outbound_htlc_count as usize, + DEFAULT_MAX_HTLCS as usize, + )); for _ in 0..pending_outbound_htlc_count { pending_outbound_htlcs.push(OutboundHTLCOutput { htlc_id: Readable::read(reader)?, @@ -12762,7 +13003,10 @@ where } let holding_cell_htlc_update_count: u64 = Readable::read(reader)?; - let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2)); + let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min( + holding_cell_htlc_update_count as usize, + DEFAULT_MAX_HTLCS as usize * 2, + )); for _ in 0..holding_cell_htlc_update_count { holding_cell_htlc_updates.push(match ::read(reader)? { 0 => HTLCUpdateAwaitingACK::AddHTLC { @@ -12777,6 +13021,7 @@ where 1 => HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: Readable::read(reader)?, htlc_id: Readable::read(reader)?, + attribution_data: None, }, 2 => HTLCUpdateAwaitingACK::FailHTLC { htlc_id: Readable::read(reader)?, @@ -12800,15 +13045,25 @@ where let monitor_pending_commitment_signed = Readable::read(reader)?; let monitor_pending_forwards_count: u64 = Readable::read(reader)?; - let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize)); + let mut monitor_pending_forwards = Vec::with_capacity(cmp::min( + monitor_pending_forwards_count as usize, + DEFAULT_MAX_HTLCS as usize, + )); for _ in 0..monitor_pending_forwards_count { monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?)); } let monitor_pending_failures_count: u64 = Readable::read(reader)?; - let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize)); + let mut monitor_pending_failures = Vec::with_capacity(cmp::min( + monitor_pending_failures_count as usize, + DEFAULT_MAX_HTLCS as usize, + )); for _ in 0..monitor_pending_failures_count { - monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?)); + monitor_pending_failures.push(( + Readable::read(reader)?, + Readable::read(reader)?, + Readable::read(reader)?, + )); } let pending_update_fee_value: Option = Readable::read(reader)?; @@ -12866,7 +13121,8 @@ where _ => return Err(DecodeError::InvalidValue), }; - let channel_parameters: ChannelTransactionParameters = ReadableArgs::>::read(reader, Some(channel_value_satoshis))?; + let channel_parameters: ChannelTransactionParameters = + ReadableArgs::>::read(reader, Some(channel_value_satoshis))?; let funding_transaction: Option = Readable::read(reader)?; let counterparty_cur_commitment_point = Readable::read(reader)?; @@ -12880,11 +13136,14 @@ where let channel_update_status = Readable::read(reader)?; let pending_update_fee = if let Some(feerate) = pending_update_fee_value { - Some((feerate, if channel_parameters.is_outbound_from_holder { - FeeUpdateState::Outbound - } else { - FeeUpdateState::AwaitingRemoteRevokeToAnnounce - })) + Some(( + feerate, + if channel_parameters.is_outbound_from_holder { + FeeUpdateState::Outbound + } else { + FeeUpdateState::AwaitingRemoteRevokeToAnnounce + }, + )) } else { None }; @@ -12892,8 +13151,14 @@ where let mut announcement_sigs = None; let mut target_closing_feerate_sats_per_kw = None; let mut monitor_pending_finalized_fulfills = Some(Vec::new()); - let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis)); - let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config)); + let mut holder_selected_channel_reserve_satoshis = Some( + get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis), + ); + let mut holder_max_htlc_value_in_flight_msat = + Some(get_holder_max_htlc_value_in_flight_msat( + channel_value_satoshis, + &UserConfig::default().channel_handshake_config, + )); // Prior to supporting channel type negotiation, all of our channels were static_remotekey // only, so we default to that if none was written. let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key()); @@ -12928,8 +13193,8 @@ where let mut pending_outbound_blinding_points_opt: Option>> = None; let mut holding_cell_blinding_points_opt: Option>> = None; - let mut removed_htlc_failure_attribution_data: Option>> = None; - let mut holding_cell_failure_attribution_data: Option>> = None; + let mut removed_htlc_attribution_data: Option>> = None; + let mut holding_cell_attribution_data: Option>> = None; let mut malformed_htlcs: Option> = None; let mut monitor_pending_update_adds: Option> = None; @@ -12981,8 +13246,8 @@ where (51, is_manual_broadcast, option), (53, funding_tx_broadcast_safe_event_emitted, option), (54, pending_funding, optional_vec), // Added in 0.2 - (55, removed_htlc_failure_attribution_data, optional_vec), - (57, holding_cell_failure_attribution_data, optional_vec), + (55, removed_htlc_attribution_data, optional_vec), + (57, holding_cell_attribution_data, optional_vec), (58, interactive_tx_signing_session, option), // Added in 0.2 (59, minimum_depth_override, option), // Added in 0.2 (60, historical_scids, optional_vec), // Added in 0.2 @@ -12993,19 +13258,23 @@ where let mut iter = preimages.into_iter(); for htlc in pending_outbound_htlcs.iter_mut() { match &mut htlc.state { - OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(ref mut preimage)) => { + OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success( + ref mut preimage, + )) => { // This variant was initialized like this further above debug_assert_eq!(preimage, &PaymentPreimage([0u8; 32])); // Flatten and unwrap the preimage; they are always set starting in 0.2. *preimage = iter.next().flatten().ok_or(DecodeError::InvalidValue)?; - } - OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(ref mut preimage)) => { + }, + OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success( + ref mut preimage, + )) => { // This variant was initialized like this further above debug_assert_eq!(preimage, &PaymentPreimage([0u8; 32])); // Flatten and unwrap the preimage; they are always set starting in 0.2. *preimage = iter.next().flatten().ok_or(DecodeError::InvalidValue)?; - } - _ => {} + }, + _ => {}, } } // We expect all preimages to be consumed above @@ -13014,7 +13283,9 @@ where } let chan_features = channel_type.unwrap(); - if chan_features.supports_any_optional_bits() || chan_features.requires_unknown_bits_from(&our_supported_features) { + if chan_features.supports_any_optional_bits() + || chan_features.requires_unknown_bits_from(&our_supported_features) + { // If the channel was written by a new version and negotiated with features we don't // understand yet, refuse to read it. return Err(DecodeError::UnknownRequiredFeature); @@ -13040,7 +13311,9 @@ where htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?; } // We expect all skimmed fees to be consumed above - if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } } if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt { let mut iter = skimmed_fees.into_iter(); @@ -13050,7 +13323,9 @@ where } } // We expect all skimmed fees to be consumed above - if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } } if let Some(blinding_pts) = pending_outbound_blinding_points_opt { let mut iter = blinding_pts.into_iter(); @@ -13058,7 +13333,9 @@ where htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?; } // We expect all blinding points to be consumed above - if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } } if let Some(blinding_pts) = holding_cell_blinding_points_opt { let mut iter = blinding_pts.into_iter(); @@ -13068,74 +13345,116 @@ where } } // We expect all blinding points to be consumed above - if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } } - if let Some(attribution_data_list) = removed_htlc_failure_attribution_data { + if let Some(attribution_data_list) = removed_htlc_attribution_data { let mut removed_htlc_relay_failures = - pending_inbound_htlcs.iter_mut().filter_map(|status| - if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(ref mut packet)) = &mut status.state { - Some(&mut packet.attribution_data) + pending_inbound_htlcs.iter_mut().filter_map(|status| { + if let InboundHTLCState::LocalRemoved(reason) = &mut status.state { + match reason { + InboundHTLCRemovalReason::FailRelay(ref mut packet) => { + Some(&mut packet.attribution_data) + }, + InboundHTLCRemovalReason::Fulfill(_, ref mut attribution_data) => { + Some(attribution_data) + }, + _ => None, + } } else { None } - ); + }); for attribution_data in attribution_data_list { - *removed_htlc_relay_failures.next().ok_or(DecodeError::InvalidValue)? = attribution_data; + *removed_htlc_relay_failures.next().ok_or(DecodeError::InvalidValue)? = + attribution_data; + } + if removed_htlc_relay_failures.next().is_some() { + return Err(DecodeError::InvalidValue); } - if removed_htlc_relay_failures.next().is_some() { return Err(DecodeError::InvalidValue); } } - if let Some(attribution_data_list) = holding_cell_failure_attribution_data { + if let Some(attribution_data_list) = holding_cell_attribution_data { let mut holding_cell_failures = - holding_cell_htlc_updates.iter_mut().filter_map(|upd| - if let HTLCUpdateAwaitingACK::FailHTLC { err_packet: OnionErrorPacket { ref mut attribution_data, .. }, .. } = upd { + holding_cell_htlc_updates.iter_mut().filter_map(|upd| match upd { + HTLCUpdateAwaitingACK::FailHTLC { + err_packet: OnionErrorPacket { ref mut attribution_data, .. }, + .. + } => Some(attribution_data), + HTLCUpdateAwaitingACK::ClaimHTLC { attribution_data, .. } => { Some(attribution_data) - } else { - None - } - ); + }, + _ => None, + }); for attribution_data in attribution_data_list { *holding_cell_failures.next().ok_or(DecodeError::InvalidValue)? = attribution_data; } - if holding_cell_failures.next().is_some() { return Err(DecodeError::InvalidValue); } + if holding_cell_failures.next().is_some() { + return Err(DecodeError::InvalidValue); + } } if let Some(malformed_htlcs) = malformed_htlcs { for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs { - let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| { - if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc { - let matches = *htlc_id == malformed_htlc_id; - if matches { debug_assert!(err_packet.data.is_empty()) } - matches - } else { false } - }).ok_or(DecodeError::InvalidValue)?; + let htlc_idx = holding_cell_htlc_updates + .iter() + .position(|htlc| { + if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc { + let matches = *htlc_id == malformed_htlc_id; + if matches { + debug_assert!(err_packet.data.is_empty()) + } + matches + } else { + false + } + }) + .ok_or(DecodeError::InvalidValue)?; let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC { - htlc_id: malformed_htlc_id, failure_code, sha256_of_onion + htlc_id: malformed_htlc_id, + failure_code, + sha256_of_onion, }; - let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc); + let _ = + core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc); } } // If we're restoring this channel for the first time after an upgrade, then we require that the // signer be available so that we can immediately populate the current commitment point. Channel // restoration will fail if this is not possible. - let holder_commitment_point = match (cur_holder_commitment_point_opt, next_holder_commitment_point_opt) { + let holder_commitment_point = match ( + cur_holder_commitment_point_opt, + next_holder_commitment_point_opt, + ) { (Some(current), Some(next)) => HolderCommitmentPoint::Available { - transaction_number: cur_holder_commitment_transaction_number, current, next + transaction_number: cur_holder_commitment_transaction_number, + current, + next, }, (Some(current), _) => HolderCommitmentPoint::PendingNext { - transaction_number: cur_holder_commitment_transaction_number, current, + transaction_number: cur_holder_commitment_transaction_number, + current, }, (_, _) => { let current = holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx) .expect("Must be able to derive the current commitment point upon channel restoration"); - let next = holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx) - .expect("Must be able to derive the next commitment point upon channel restoration"); + let next = holder_signer + .get_per_commitment_point( + cur_holder_commitment_transaction_number - 1, + &secp_ctx, + ) + .expect( + "Must be able to derive the next commitment point upon channel restoration", + ); HolderCommitmentPoint::Available { - transaction_number: cur_holder_commitment_transaction_number, current, next, + transaction_number: cur_holder_commitment_transaction_number, + current, + next, } }, }; @@ -13144,7 +13463,8 @@ where funding: FundingScope { value_to_self_msat, counterparty_selected_channel_reserve_satoshis, - holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(), + holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis + .unwrap(), #[cfg(debug_assertions)] holder_max_commitment_tx_output: Mutex::new((0, 0)), @@ -13261,9 +13581,11 @@ where outbound_scid_alias, historical_scids: historical_scids.unwrap(), - funding_tx_broadcast_safe_event_emitted: funding_tx_broadcast_safe_event_emitted.unwrap_or(false), + funding_tx_broadcast_safe_event_emitted: funding_tx_broadcast_safe_event_emitted + .unwrap_or(false), channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true), - initial_channel_ready_event_emitted: initial_channel_ready_event_emitted.unwrap_or(true), + initial_channel_ready_event_emitted: initial_channel_ready_event_emitted + .unwrap_or(true), channel_keys_id, @@ -13282,7 +13604,7 @@ where } } -fn duration_since_epoch() -> Option { +pub(crate) fn duration_since_epoch() -> Option { #[cfg(not(feature = "std"))] let now = None; @@ -13916,7 +14238,6 @@ mod tests { } #[test] - #[rustfmt::skip] fn blinding_point_skimmed_fee_malformed_ser() { // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized // properly. @@ -13929,23 +14250,71 @@ mod tests { let best_block = BestBlock::from_network(network); let keys_provider = TestKeysInterface::new(&seed, network); - let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); + let node_b_node_id = + PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); let features = channelmanager::provided_init_features(&config); let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new( - &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &logger - ).unwrap(); + &feeest, + &&keys_provider, + &&keys_provider, + node_b_node_id, + &features, + 10000000, + 100000, + 42, + &config, + 0, + 42, + None, + &logger, + ) + .unwrap(); let mut inbound_chan = InboundV1Channel::<&TestKeysInterface>::new( - &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), - &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network), &&logger).unwrap(), 7, &config, 0, &&logger, false - ).unwrap(); - outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(&&logger).unwrap(), &config.channel_handshake_limits, &features).unwrap(); - let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut { - value: Amount::from_sat(10000000), script_pubkey: outbound_chan.funding.get_funding_redeemscript(), - }]}; - let funding_outpoint = OutPoint{ txid: tx.compute_txid(), index: 0 }; - let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap(); - let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) { + &feeest, + &&keys_provider, + &&keys_provider, + node_b_node_id, + &channelmanager::provided_channel_type_features(&config), + &features, + &outbound_chan + .get_open_channel(ChainHash::using_genesis_block(network), &&logger) + .unwrap(), + 7, + &config, + 0, + &&logger, + false, + ) + .unwrap(); + outbound_chan + .accept_channel( + &inbound_chan.get_accept_channel_message(&&logger).unwrap(), + &config.channel_handshake_limits, + &features, + ) + .unwrap(); + let tx = Transaction { + version: Version::ONE, + lock_time: LockTime::ZERO, + input: Vec::new(), + output: vec![TxOut { + value: Amount::from_sat(10000000), + script_pubkey: outbound_chan.funding.get_funding_redeemscript(), + }], + }; + let funding_outpoint = OutPoint { txid: tx.compute_txid(), index: 0 }; + let funding_created = outbound_chan + .get_funding_created(tx.clone(), funding_outpoint, false, &&logger) + .map_err(|_| ()) + .unwrap() + .unwrap(); + let mut chan = match inbound_chan.funding_created( + &funding_created, + best_block, + &&keys_provider, + &&logger, + ) { Ok((chan, _, _)) => chan, Err((_, e)) => panic!("{}", e), }; @@ -13953,11 +14322,15 @@ mod tests { let dummy_htlc_source = HTLCSource::OutboundRoute { path: Path { hops: vec![RouteHop { - pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(), - node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0, - cltv_expiry_delta: 0, maybe_announced_channel: false, + pubkey: test_utils::pubkey(2), + channel_features: ChannelFeatures::empty(), + node_features: NodeFeatures::empty(), + short_channel_id: 0, + fee_msat: 0, + cltv_expiry_delta: 0, + maybe_announced_channel: false, }], - blinded_tail: None + blinded_tail: None, }, session_priv: test_utils::privkey(42), first_hop_htlc_msat: 0, @@ -13994,8 +14367,8 @@ mod tests { onion_routing_packet: msgs::OnionPacket { version: 0, public_key: Ok(test_utils::pubkey(1)), - hop_data: [0; 20*65], - hmac: [0; 32] + hop_data: [0; 20 * 65], + hmac: [0; 32], }, skimmed_fee_msat: None, blinding_point: None, @@ -14003,14 +14376,21 @@ mod tests { let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: PaymentPreimage([42; 32]), htlc_id: 0, + attribution_data: None, }; let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC { - htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data: Some(AttributionData::new()) } - }; - let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC { - htlc_id, failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), - sha256_of_onion: [0; 32], + htlc_id, + err_packet: msgs::OnionErrorPacket { + data: vec![42], + attribution_data: Some(AttributionData::new()), + }, }; + let dummy_holding_cell_malformed_htlc = + |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC { + htlc_id, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), + sha256_of_onion: [0; 32], + }; let mut holding_cell_htlc_updates = Vec::with_capacity(12); for i in 0..12 { if i % 5 == 0 { @@ -14020,11 +14400,16 @@ mod tests { } else if i % 5 == 2 { let mut dummy_add = dummy_holding_cell_add_htlc.clone(); if let HTLCUpdateAwaitingACK::AddHTLC { - ref mut blinding_point, ref mut skimmed_fee_msat, .. - } = &mut dummy_add { + ref mut blinding_point, + ref mut skimmed_fee_msat, + .. + } = &mut dummy_add + { *blinding_point = Some(test_utils::pubkey(42 + i)); *skimmed_fee_msat = Some(42); - } else { panic!() } + } else { + panic!() + } holding_cell_htlc_updates.push(dummy_add); } else if i % 5 == 3 { holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64)); @@ -14037,9 +14422,12 @@ mod tests { // Encode and decode the channel and ensure that the HTLCs within are the same. let encoded_chan = chan.encode(); let mut s = crate::io::Cursor::new(&encoded_chan); - let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); + let mut reader = + crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64); let features = channelmanager::provided_channel_type_features(&config); - let decoded_chan = FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, &features)).unwrap(); + let decoded_chan = + FundedChannel::read(&mut reader, (&&keys_provider, &&keys_provider, &features)) + .unwrap(); assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs); assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates); } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5c39d503a6b..fb17859fda4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -56,14 +56,15 @@ use crate::events::{ InboundChannelFunds, PaymentFailureReason, ReplayEvent, }; use crate::events::{FundingInfo, PaidBolt12Invoice}; +use crate::ln::onion_utils::process_onion_success; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. -use crate::ln::channel::PendingV2Channel; use crate::ln::channel::{ self, Channel, ChannelError, ChannelUpdateStatus, FundedChannel, InboundV1Channel, OutboundV1Channel, ReconnectionMsg, ShutdownResult, UpdateFulfillCommitFetch, WithChannelContext, }; +use crate::ln::channel::{duration_since_epoch, PendingV2Channel}; use crate::ln::channel_state::ChannelDetails; use crate::ln::inbound_payment; use crate::ln::msgs; @@ -76,6 +77,7 @@ use crate::ln::onion_payment::{ decode_incoming_update_add_htlc_onion, invalid_payment_err_data, HopConnector, InboundHTLCErr, NextPacketDetails, }; +use crate::ln::onion_utils::AttributionData; use crate::ln::onion_utils::{self}; use crate::ln::onion_utils::{HTLCFailReason, LocalHTLCFailureReason}; use crate::ln::our_peer_storage::EncryptedOurPeerStorage; @@ -7515,7 +7517,6 @@ where self.claim_payment_internal(payment_preimage, true); } - #[rustfmt::skip] fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array()); @@ -7523,7 +7524,10 @@ where let (sources, claiming_payment) = { let res = self.claimable_payments.lock().unwrap().begin_claiming_payment( - payment_hash, &self.node_signer, &self.logger, &self.inbound_payment_id_secret, + payment_hash, + &self.node_signer, + &self.logger, + &self.inbound_payment_id_secret, custom_tlvs_known, ); @@ -7531,13 +7535,21 @@ where Ok((htlcs, payment_info)) => (htlcs, payment_info), Err(htlcs) => { for htlc in htlcs { - let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc); + let reason = self.get_htlc_fail_reason_from_failure_code( + FailureCode::InvalidOnionPayload(None), + &htlc, + ); let source = HTLCSource::PreviousHopData(htlc.prev_hop); let receiver = HTLCHandlingFailureType::Receive { payment_hash }; - self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); + self.fail_htlc_backwards_internal( + &source, + &payment_hash, + &reason, + receiver, + ); } return; - } + }, } }; debug_assert!(!sources.is_empty()); @@ -7573,7 +7585,10 @@ where mem::drop(per_peer_state); if sources.is_empty() || expected_amt_msat.is_none() { self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); - log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); + log_info!( + self.logger, + "Attempted to claim an incomplete payment which no longer had any available HTLCs!" + ); return; } if claimable_amt_msat != expected_amt_msat.unwrap() { @@ -7583,18 +7598,21 @@ where return; } if valid_mpp { - let mpp_parts: Vec<_> = sources.iter().filter_map(|htlc| { - if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { - Some(MPPClaimHTLCSource { - counterparty_node_id: cp_id, - funding_txo: htlc.prev_hop.outpoint, - channel_id: htlc.prev_hop.channel_id, - htlc_id: htlc.prev_hop.htlc_id, - }) - } else { - None - } - }).collect(); + let mpp_parts: Vec<_> = sources + .iter() + .filter_map(|htlc| { + if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { + Some(MPPClaimHTLCSource { + counterparty_node_id: cp_id, + funding_txo: htlc.prev_hop.outpoint, + channel_id: htlc.prev_hop.channel_id, + htlc_id: htlc.prev_hop.htlc_id, + }) + } else { + None + } + }) + .collect(); let pending_mpp_claim_ptr_opt = if sources.len() > 1 { let mut channels_without_preimage = Vec::with_capacity(mpp_parts.len()); for part in mpp_parts.iter() { @@ -7612,32 +7630,56 @@ where }; let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment }); for htlc in sources { - let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim| - if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { - let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim)); - Some((cp_id, htlc.prev_hop.channel_id, claim_ptr)) - } else { - None - } - ); + let this_mpp_claim = + pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim| { + if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { + let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim)); + Some((cp_id, htlc.prev_hop.channel_id, claim_ptr)) + } else { + None + } + }); let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| { RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)), } }); + + // Create new attribution data as the final hop. Always report a zero hold time, because reporting a + // non-zero value will not make a difference in the penalty that may be applied by the sender. + let mut attribution_data = AttributionData::new(); + attribution_data.update(&[], &htlc.prev_hop.incoming_packet_shared_secret, 0); + attribution_data.crypt(&htlc.prev_hop.incoming_packet_shared_secret); + self.claim_funds_from_hop( - htlc.prev_hop, payment_preimage, payment_info.clone(), + htlc.prev_hop, + payment_preimage, + payment_info.clone(), + Some(attribution_data), |_, definitely_duplicate| { - debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment"); - (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker) - } + debug_assert!( + !definitely_duplicate, + "We shouldn't claim duplicatively from a payment" + ); + ( + Some(MonitorUpdateCompletionAction::PaymentClaimed { + payment_hash, + pending_mpp_claim: this_mpp_claim, + }), + raa_blocker, + ) + }, ); } } else { for htlc in sources { - let err_data = invalid_payment_err_data(htlc.value, self.best_block.read().unwrap().height); + let err_data = + invalid_payment_err_data(htlc.value, self.best_block.read().unwrap().height); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let reason = HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, err_data); + let reason = HTLCFailReason::reason( + LocalHTLCFailureReason::IncorrectPaymentDetails, + err_data, + ); let receiver = HTLCHandlingFailureType::Receive { payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } @@ -7658,7 +7700,8 @@ where ) -> (Option, Option), >( &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, - payment_info: Option, completion_action: ComplFunc, + payment_info: Option, attribution_data: Option, + completion_action: ComplFunc, ) { let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| { let short_to_chan_info = self.short_to_chan_info.read().unwrap(); @@ -7671,15 +7714,24 @@ where channel_id: prev_hop.channel_id, htlc_id: prev_hop.htlc_id, }; - self.claim_mpp_part(htlc_source, payment_preimage, payment_info, completion_action) + self.claim_mpp_part( + htlc_source, + payment_preimage, + payment_info, + attribution_data, + completion_action, + ) } - #[rustfmt::skip] fn claim_mpp_part< - ComplFunc: FnOnce(Option, bool) -> (Option, Option) + ComplFunc: FnOnce( + Option, + bool, + ) -> (Option, Option), >( &self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage, - payment_info: Option, completion_action: ComplFunc, + payment_info: Option, attribution_data: Option, + completion_action: ComplFunc, ) { //TODO: Delay the claimed_funds relaying just like we do outbound relay! @@ -7701,33 +7753,59 @@ where // Note here that `peer_state_opt` is always `Some` if `prev_hop.counterparty_node_id` is // `Some`. This is relied on in the closed-channel case below. - let mut peer_state_opt = prev_hop.counterparty_node_id.as_ref().map( - |counterparty_node_id| per_peer_state.get(counterparty_node_id) - .map(|peer_mutex| peer_mutex.lock().unwrap()) - .expect(MISSING_MON_ERROR) - ); + let mut peer_state_opt = + prev_hop.counterparty_node_id.as_ref().map(|counterparty_node_id| { + per_peer_state + .get(counterparty_node_id) + .map(|peer_mutex| peer_mutex.lock().unwrap()) + .expect(MISSING_MON_ERROR) + }); if let Some(peer_state_lock) = peer_state_opt.as_mut() { let peer_state = &mut **peer_state_lock; - if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(chan_id) { + if let hash_map::Entry::Occupied(mut chan_entry) = + peer_state.channel_by_id.entry(chan_id) + { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger); + let fulfill_res = chan.get_update_fulfill_htlc_and_commit( + prev_hop.htlc_id, + payment_preimage, + payment_info, + attribution_data, + &&logger, + ); match fulfill_res { UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => { - let (action_opt, raa_blocker_opt) = completion_action(Some(htlc_value_msat), false); + let (action_opt, raa_blocker_opt) = + completion_action(Some(htlc_value_msat), false); if let Some(action) = action_opt { log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}", chan_id, action); - peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); + peer_state + .monitor_update_blocked_actions + .entry(chan_id) + .or_insert(Vec::new()) + .push(action); } if let Some(raa_blocker) = raa_blocker_opt { - peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker); + peer_state + .actions_blocking_raa_monitor_updates + .entry(chan_id) + .or_insert_with(Vec::new) + .push(raa_blocker); } - handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt, - peer_state, per_peer_state, chan); - } + handle_new_monitor_update!( + self, + prev_hop.funding_txo, + monitor_update, + peer_state_opt, + peer_state, + per_peer_state, + chan + ); + }, UpdateFulfillCommitFetch::DuplicateClaim {} => { let (action_opt, raa_blocker_opt) = completion_action(None, true); if let Some(raa_blocker) = raa_blocker_opt { @@ -7739,8 +7817,14 @@ where // // In any other case, the RAA blocker must still be present and // blocking RAAs. - debug_assert!(during_init || - peer_state.actions_blocking_raa_monitor_updates.get(&chan_id).unwrap().contains(&raa_blocker)); + debug_assert!( + during_init + || peer_state + .actions_blocking_raa_monitor_updates + .get(&chan_id) + .unwrap() + .contains(&raa_blocker) + ); } let action = if let Some(action) = action_opt { action @@ -7755,8 +7839,10 @@ where if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: node_id, downstream_funding_outpoint: _, - blocking_action: blocker, downstream_channel_id: channel_id, - } = action { + blocking_action: blocker, + downstream_channel_id: channel_id, + } = action + { if let Some(peer_state_mtx) = per_peer_state.get(&node_id) { let mut peer_state = peer_state_mtx.lock().unwrap(); if let Some(blockers) = peer_state @@ -7769,7 +7855,9 @@ where // which case we need to only remove the one // blocker which was added duplicatively. let first_blocker = !found_blocker; - if *iter == blocker { found_blocker = true; } + if *iter == blocker { + found_blocker = true; + } *iter != blocker || !first_blocker }); debug_assert!(found_blocker); @@ -7777,7 +7865,10 @@ where } else { debug_assert!(false); } - } else if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) { + } else if matches!( + action, + MonitorUpdateCompletionAction::PaymentClaimed { .. } + ) { debug_assert!(during_init, "Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)"); mem::drop(per_peer_state); @@ -7787,7 +7878,7 @@ where "Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)"); return; }; - } + }, } } return; @@ -7802,10 +7893,14 @@ where payment_preimage, ); } - let counterparty_node_id = prev_hop.counterparty_node_id.expect("Checked immediately above"); - let mut peer_state = peer_state_opt.expect("peer_state_opt is always Some when the counterparty_node_id is Some"); + let counterparty_node_id = + prev_hop.counterparty_node_id.expect("Checked immediately above"); + let mut peer_state = peer_state_opt + .expect("peer_state_opt is always Some when the counterparty_node_id is Some"); - let update_id = if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) { + let update_id = if let Some(latest_update_id) = + peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) + { *latest_update_id = latest_update_id.saturating_add(1); *latest_update_id } else { @@ -7833,7 +7928,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (action_opt, raa_blocker_opt) = completion_action(None, false); if let Some(raa_blocker) = raa_blocker_opt { - peer_state.actions_blocking_raa_monitor_updates + peer_state + .actions_blocking_raa_monitor_updates .entry(prev_hop.channel_id) .or_default() .push(raa_blocker); @@ -7842,17 +7938,37 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ // Given the fact that we're in a bit of a weird edge case, its worth hashing the preimage // to include the `payment_hash` in the log metadata here. let payment_hash = payment_preimage.into(); - let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash)); + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(chan_id), + Some(payment_hash), + ); if let Some(action) = action_opt { - log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}", - chan_id, action); - peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); + log_trace!( + logger, + "Tracking monitor update completion action for closed channel {}: {:?}", + chan_id, + action + ); + peer_state + .monitor_update_blocked_actions + .entry(chan_id) + .or_insert(Vec::new()) + .push(action); } handle_new_monitor_update!( - self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, - counterparty_node_id, chan_id, POST_CHANNEL_CLOSE + self, + prev_hop.funding_txo, + preimage_update, + peer_state, + peer_state, + per_peer_state, + counterparty_node_id, + chan_id, + POST_CHANNEL_CLOSE ); } @@ -7865,9 +7981,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ forwarded_htlc_value_msat: Option, skimmed_fee_msat: Option, from_onchain: bool, startup_replay: bool, next_channel_counterparty_node_id: PublicKey, next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option, + attribution_data: Option<&AttributionData>, send_timestamp: Option, ) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, bolt12_invoice, .. } => { + // Extract the hold times for this fulfilled HTLC, if available. + if let Some(attribution_data) = attribution_data { + let _ = process_onion_success(&self.secp_ctx, &self.logger, &path, + &session_priv, attribution_data.clone()); + } + debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire), "We don't support claim_htlc claims during startup - monitors may not be available yet"); debug_assert_eq!(next_channel_counterparty_node_id, path.hops[0].pubkey); @@ -7884,7 +8007,31 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let prev_user_channel_id = hop_data.user_channel_id; let prev_node_id = hop_data.counterparty_node_id; let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); - self.claim_funds_from_hop(hop_data, payment_preimage, None, + + // If attribution data was received from downstream, we shift it and get it ready for adding our hold + // time. + let mut attribution_data = attribution_data + .map_or(AttributionData::new(), |attribution_data| { + let mut attribution_data = attribution_data.clone(); + + attribution_data.shift_right(); + + attribution_data + }); + + // Obtain hold time, if available. + let now = duration_since_epoch(); + let hold_time = if let (Some(timestamp), Some(now)) = (send_timestamp, now) { + u32::try_from(now.saturating_sub(timestamp).as_millis()).unwrap_or(u32::MAX) + } else { + 0 + }; + + // Finish attribution data by adding our hold time and crypting it. + attribution_data.update(&[], &hop_data.incoming_packet_shared_secret, hold_time); + attribution_data.crypt(&hop_data.incoming_packet_shared_secret); + + self.claim_funds_from_hop(hop_data, payment_preimage, None, Some(attribution_data), |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = Some(EventUnblockedChannel { counterparty_node_id: next_channel_counterparty_node_id, @@ -9424,17 +9571,23 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ Ok(()) } - #[rustfmt::skip] - fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { + fn internal_update_fulfill_htlc( + &self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC, + ) -> Result<(), MsgHandleErrInternal> { let funding_txo; let next_user_channel_id; - let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = { + let (htlc_source, forwarded_htlc_value, skimmed_fee_msat, send_timestamp) = { let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| { - debug_assert!(false); - MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) - })?; + let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close( + format!( + "Can't find a peer matching the passed counterparty node_id {}", + counterparty_node_id + ), + msg.channel_id, + ) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -9467,9 +9620,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), - Some(forwarded_htlc_value), skimmed_fee_msat, false, false, *counterparty_node_id, - funding_txo, msg.channel_id, Some(next_user_channel_id), + self.claim_funds_internal( + htlc_source, + msg.payment_preimage.clone(), + Some(forwarded_htlc_value), + skimmed_fee_msat, + false, + false, + *counterparty_node_id, + funding_txo, + msg.channel_id, + Some(next_user_channel_id), + msg.attribution_data.as_ref(), + send_timestamp, ); Ok(()) @@ -10269,62 +10432,115 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } /// Process pending events from the [`chain::Watch`], returning whether any events were processed. - #[rustfmt::skip] fn process_pending_monitor_events(&self) -> bool { debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock let mut failed_channels = Vec::new(); let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { + for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in + pending_monitor_events.drain(..) + { for monitor_event in monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { - let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), Some(htlc_update.payment_hash)); + let logger = WithContext::from( + &self.logger, + Some(counterparty_node_id), + Some(channel_id), + Some(htlc_update.payment_hash), + ); if let Some(preimage) = htlc_update.payment_preimage { - log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage); + log_trace!( + logger, + "Claiming HTLC with preimage {} from our monitor", + preimage + ); + // Claim the funds from the previous hop, if there is one. Because this is in response to a + // chain event, no attribution data is available. self.claim_funds_internal( - htlc_update.source, preimage, - htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true, - false, counterparty_node_id, funding_outpoint, channel_id, None, + htlc_update.source, + preimage, + htlc_update.htlc_value_satoshis.map(|v| v * 1000), + None, + true, + false, + counterparty_node_id, + funding_outpoint, + channel_id, + None, + None, + None, ); } else { - log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); + log_trace!( + logger, + "Failing HTLC with hash {} from our monitor", + &htlc_update.payment_hash + ); let failure_reason = LocalHTLCFailureReason::OnChainTimeout; - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(counterparty_node_id), + channel_id, + }; let reason = HTLCFailReason::from_failure_code(failure_reason); - self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); + self.fail_htlc_backwards_internal( + &htlc_update.source, + &htlc_update.payment_hash, + &reason, + receiver, + ); } }, - MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => { + MonitorEvent::HolderForceClosed(_) + | MonitorEvent::HolderForceClosedWithInfo { .. } => { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) { - let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event { + if let hash_map::Entry::Occupied(mut chan_entry) = + peer_state.channel_by_id.entry(channel_id) + { + let reason = if let MonitorEvent::HolderForceClosedWithInfo { + reason, + .. + } = monitor_event + { reason } else { - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) } + ClosureReason::HolderForceClosed { + broadcasted_latest_txn: Some(true), + } }; - let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone()); - let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res); + let mut shutdown_res = + chan_entry.get_mut().force_shutdown(false, reason.clone()); + let chan = remove_channel_entry!( + self, + peer_state, + chan_entry, + shutdown_res + ); failed_channels.push(shutdown_res); if let Some(funded_chan) = chan.as_funded() { - if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) { - let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); - pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); + if let Ok(update) = + self.get_channel_update_for_broadcast(funded_chan) + { + let mut pending_broadcast_messages = + self.pending_broadcast_messages.lock().unwrap(); + pending_broadcast_messages.push( + MessageSendEvent::BroadcastChannelUpdate { + msg: update, + }, + ); } pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id: funded_chan.context.channel_id(), - data: reason.to_string() - }) + data: reason.to_string(), + }), }, }); } @@ -10332,7 +10548,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, MonitorEvent::Completed { channel_id, monitor_update_id, .. } => { - self.channel_monitor_updated(&channel_id, monitor_update_id, &counterparty_node_id); + self.channel_monitor_updated( + &channel_id, + monitor_update_id, + &counterparty_node_id, + ); }, } } @@ -14513,102 +14733,145 @@ where MR::Target: MessageRouter, L::Target: Logger, { - #[rustfmt::skip] - fn read(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result { + fn read( + reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, + ) -> Result { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); let chain_hash: ChainHash = Readable::read(reader)?; let best_block_height: u32 = Readable::read(reader)?; let best_block_hash: BlockHash = Readable::read(reader)?; - let empty_peer_state = || { - PeerState { - channel_by_id: new_hash_map(), - inbound_channel_request_by_id: new_hash_map(), - latest_features: InitFeatures::empty(), - pending_msg_events: Vec::new(), - in_flight_monitor_updates: BTreeMap::new(), - monitor_update_blocked_actions: BTreeMap::new(), - actions_blocking_raa_monitor_updates: BTreeMap::new(), - closed_channel_monitor_update_ids: BTreeMap::new(), - peer_storage: Vec::new(), - is_connected: false, - } + let empty_peer_state = || PeerState { + channel_by_id: new_hash_map(), + inbound_channel_request_by_id: new_hash_map(), + latest_features: InitFeatures::empty(), + pending_msg_events: Vec::new(), + in_flight_monitor_updates: BTreeMap::new(), + monitor_update_blocked_actions: BTreeMap::new(), + actions_blocking_raa_monitor_updates: BTreeMap::new(), + closed_channel_monitor_update_ids: BTreeMap::new(), + peer_storage: Vec::new(), + is_connected: false, }; let mut failed_htlcs = Vec::new(); let channel_count: u64 = Readable::read(reader)?; let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128)); - let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex>)>())); + let mut per_peer_state = hash_map_with_capacity(cmp::min( + channel_count as usize, + MAX_ALLOC_SIZE / mem::size_of::<(PublicKey, Mutex>)>(), + )); let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); for _ in 0..channel_count { - let mut channel: FundedChannel = FundedChannel::read(reader, ( - &args.entropy_source, &args.signer_provider, &provided_channel_type_features(&args.default_config) - ))?; + let mut channel: FundedChannel = FundedChannel::read( + reader, + ( + &args.entropy_source, + &args.signer_provider, + &provided_channel_type_features(&args.default_config), + ), + )?; let logger = WithChannelContext::from(&args.logger, &channel.context, None); let channel_id = channel.context.channel_id(); channel_id_set.insert(channel_id); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&channel_id) { - if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || - channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || - channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || - channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + if channel.get_cur_holder_commitment_transaction_number() + > monitor.get_cur_holder_commitment_number() + || channel.get_revoked_counterparty_commitment_transaction_number() + > monitor.get_min_seen_secret() + || channel.get_cur_counterparty_commitment_transaction_number() + > monitor.get_cur_counterparty_commitment_number() + || channel.context.get_latest_monitor_update_id() + < monitor.get_latest_update_id() + { // But if the channel is behind of the monitor, close the channel: - log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); + log_error!( + logger, + "A ChannelManager is stale compared to the current ChannelMonitor!" + ); log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); - if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + if channel.context.get_latest_monitor_update_id() + < monitor.get_latest_update_id() + { log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); } - if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() { + if channel.get_cur_holder_commitment_transaction_number() + > monitor.get_cur_holder_commitment_number() + { log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.", &channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number()); } - if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() { + if channel.get_revoked_counterparty_commitment_transaction_number() + > monitor.get_min_seen_secret() + { log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.", &channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number()); } - if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() { + if channel.get_cur_counterparty_commitment_transaction_number() + > monitor.get_cur_counterparty_commitment_number() + { log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); } - let mut shutdown_result = channel.context.force_shutdown(&channel.funding, true, ClosureReason::OutdatedChannelManager); + let mut shutdown_result = channel.context.force_shutdown( + &channel.funding, + true, + ClosureReason::OutdatedChannelManager, + ); if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { return Err(DecodeError::InvalidValue); } - if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = shutdown_result.monitor_update { + if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = + shutdown_result.monitor_update + { // Our channel information is out of sync with the `ChannelMonitor`, so // force the update to use the `ChannelMonitor`'s update_id for the close // update. let latest_update_id = monitor.get_latest_update_id().saturating_add(1); update.update_id = latest_update_id; - per_peer_state.entry(counterparty_node_id) + per_peer_state + .entry(counterparty_node_id) .or_insert_with(|| Mutex::new(empty_peer_state())) - .lock().unwrap() - .closed_channel_monitor_update_ids.entry(channel_id) - .and_modify(|v| *v = cmp::max(latest_update_id, *v)) - .or_insert(latest_update_id); + .lock() + .unwrap() + .closed_channel_monitor_update_ids + .entry(channel_id) + .and_modify(|v| *v = cmp::max(latest_update_id, *v)) + .or_insert(latest_update_id); - close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, channel_id, update - }); + close_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update, + }, + ); } failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs); - channel_closures.push_back((events::Event::ChannelClosed { - channel_id: channel.context.channel_id(), - user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::OutdatedChannelManager, - counterparty_node_id: Some(channel.context.get_counterparty_node_id()), - channel_capacity_sats: Some(channel.funding.get_value_satoshis()), - channel_funding_txo: channel.funding.get_funding_txo(), - last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), - }, None)); + channel_closures.push_back(( + events::Event::ChannelClosed { + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), + reason: ClosureReason::OutdatedChannelManager, + counterparty_node_id: Some(channel.context.get_counterparty_node_id()), + channel_capacity_sats: Some(channel.funding.get_value_satoshis()), + channel_funding_txo: channel.funding.get_funding_txo(), + last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), + }, + None, + )); for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() { let mut found_htlc = false; for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() { - if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; } + if *channel_htlc_source == monitor_htlc_source { + found_htlc = true; + break; + } } if !found_htlc { // If we have some HTLCs in the channel which are not present in the newer @@ -14618,50 +14881,91 @@ where // claim update ChannelMonitor updates were persisted prior to persising // the ChannelMonitor update for the forward leg, so attempting to fail the // backwards leg of the HTLC will simply be rejected. - let logger = WithChannelContext::from(&args.logger, &channel.context, Some(*payment_hash)); + let logger = WithChannelContext::from( + &args.logger, + &channel.context, + Some(*payment_hash), + ); log_info!(logger, "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager", &channel.context.channel_id(), &payment_hash); - failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id())); + failed_htlcs.push(( + channel_htlc_source.clone(), + *payment_hash, + channel.context.get_counterparty_node_id(), + channel.context.channel_id(), + )); } } } else { - channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id()); + channel.on_startup_drop_completed_blocked_mon_updates_through( + &logger, + monitor.get_latest_update_id(), + ); log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates", &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(), monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending()); if let Some(short_channel_id) = channel.funding.get_short_channel_id() { - short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); + short_to_chan_info.insert( + short_channel_id, + ( + channel.context.get_counterparty_node_id(), + channel.context.channel_id(), + ), + ); } for short_channel_id in channel.context.historical_scids() { - short_to_chan_info.insert(*short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); + short_to_chan_info.insert( + *short_channel_id, + ( + channel.context.get_counterparty_node_id(), + channel.context.channel_id(), + ), + ); } - per_peer_state.entry(channel.context.get_counterparty_node_id()) + per_peer_state + .entry(channel.context.get_counterparty_node_id()) .or_insert_with(|| Mutex::new(empty_peer_state())) - .get_mut().unwrap() - .channel_by_id.insert(channel.context.channel_id(), Channel::from(channel)); + .get_mut() + .unwrap() + .channel_by_id + .insert(channel.context.channel_id(), Channel::from(channel)); } } else if channel.is_awaiting_initial_mon_persist() { // If we were persisted and shut down while the initial ChannelMonitor persistence // was in-progress, we never broadcasted the funding transaction and can still // safely discard the channel. - let _ = channel.context.force_shutdown(&channel.funding, false, ClosureReason::DisconnectedPeer); - channel_closures.push_back((events::Event::ChannelClosed { - channel_id: channel.context.channel_id(), - user_channel_id: channel.context.get_user_id(), - reason: ClosureReason::DisconnectedPeer, - counterparty_node_id: Some(channel.context.get_counterparty_node_id()), - channel_capacity_sats: Some(channel.funding.get_value_satoshis()), - channel_funding_txo: channel.funding.get_funding_txo(), - last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), - }, None)); + let _ = channel.context.force_shutdown( + &channel.funding, + false, + ClosureReason::DisconnectedPeer, + ); + channel_closures.push_back(( + events::Event::ChannelClosed { + channel_id: channel.context.channel_id(), + user_channel_id: channel.context.get_user_id(), + reason: ClosureReason::DisconnectedPeer, + counterparty_node_id: Some(channel.context.get_counterparty_node_id()), + channel_capacity_sats: Some(channel.funding.get_value_satoshis()), + channel_funding_txo: channel.funding.get_funding_txo(), + last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()), + }, + None, + )); } else { - log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id()); + log_error!( + logger, + "Missing ChannelMonitor for channel {} needed by ChannelManager.", + &channel.context.channel_id() + ); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); - log_error!(logger, " Without the ChannelMonitor we cannot continue without risking funds."); + log_error!( + logger, + " Without the ChannelMonitor we cannot continue without risking funds." + ); log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); return Err(DecodeError::InvalidValue); } @@ -14684,12 +14988,15 @@ where if should_queue_fc_update { latest_update_id += 1; } - per_peer_state.entry(counterparty_node_id) + per_peer_state + .entry(counterparty_node_id) .or_insert_with(|| Mutex::new(empty_peer_state())) - .lock().unwrap() - .closed_channel_monitor_update_ids.entry(monitor.channel_id()) - .and_modify(|v| *v = cmp::max(latest_update_id, *v)) - .or_insert(latest_update_id); + .lock() + .unwrap() + .closed_channel_monitor_update_ids + .entry(monitor.channel_id()) + .and_modify(|v| *v = cmp::max(latest_update_id, *v)) + .or_insert(latest_update_id); } if !should_queue_fc_update { @@ -14698,11 +15005,16 @@ where let logger = WithChannelMonitor::from(&args.logger, monitor, None); let channel_id = monitor.channel_id(); - log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed", - &channel_id); + log_info!( + logger, + "Queueing monitor update to ensure missing channel {} is force closed", + &channel_id + ); let monitor_update = ChannelMonitorUpdate { update_id: monitor.get_latest_update_id().saturating_add(1), - updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { + should_broadcast: true, + }], channel_id: Some(monitor.channel_id()), }; let funding_txo = monitor.get_funding_txo(); @@ -14722,7 +15034,10 @@ where for _ in 0..forward_htlcs_count { let short_channel_id = Readable::read(reader)?; let pending_forwards_count: u64 = Readable::read(reader)?; - let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); + let mut pending_forwards = Vec::with_capacity(cmp::min( + pending_forwards_count as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); for _ in 0..pending_forwards_count { pending_forwards.push(Readable::read(reader)?); } @@ -14730,11 +15045,15 @@ where } let claimable_htlcs_count: u64 = Readable::read(reader)?; - let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); + let mut claimable_htlcs_list = + Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128)); for _ in 0..claimable_htlcs_count { let payment_hash = Readable::read(reader)?; let previous_hops_len: u64 = Readable::read(reader)?; - let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::())); + let mut previous_hops = Vec::with_capacity(cmp::min( + previous_hops_len as usize, + MAX_ALLOC_SIZE / mem::size_of::(), + )); for _ in 0..previous_hops_len { previous_hops.push(::read(reader)?); } @@ -14752,7 +15071,10 @@ where let event_count: u64 = Readable::read(reader)?; let mut pending_events_read: VecDeque<(events::Event, Option)> = - VecDeque::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option)>())); + VecDeque::with_capacity(cmp::min( + event_count as usize, + MAX_ALLOC_SIZE / mem::size_of::<(events::Event, Option)>(), + )); for _ in 0..event_count { match MaybeReadable::read(reader)? { Some(event) => pending_events_read.push_back((event, None)), @@ -14769,7 +15091,7 @@ where // on-startup monitor updates. let _: OutPoint = Readable::read(reader)?; let _: ChannelMonitorUpdate = Readable::read(reader)?; - } + }, _ => return Err(DecodeError::InvalidValue), } } @@ -14783,38 +15105,53 @@ where let payment_hash: PaymentHash = Readable::read(reader)?; let logger = WithContext::from(&args.logger, None, None, Some(payment_hash)); let inbound: PendingInboundPayment = Readable::read(reader)?; - log_warn!(logger, "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", payment_hash, inbound); + log_warn!( + logger, + "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", + payment_hash, + inbound + ); } let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; let mut pending_outbound_payments_compat: HashMap = - hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); + hash_map_with_capacity(cmp::min( + pending_outbound_payments_count_compat as usize, + MAX_ALLOC_SIZE / 32, + )); for _ in 0..pending_outbound_payments_count_compat { let session_priv = Readable::read(reader)?; let payment = PendingOutboundPayment::Legacy { session_privs: hash_set_from_iter([session_priv]), }; if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { - return Err(DecodeError::InvalidValue) + return Err(DecodeError::InvalidValue); }; } // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. - let mut pending_outbound_payments_no_retry: Option>> = None; + let mut pending_outbound_payments_no_retry: Option>> = + None; let mut pending_outbound_payments = None; - let mut pending_intercepted_htlcs: Option> = Some(new_hash_map()); + let mut pending_intercepted_htlcs: Option> = + Some(new_hash_map()); let mut received_network_pubkey: Option = None; let mut fake_scid_rand_bytes: Option<[u8; 32]> = None; let mut probing_cookie_secret: Option<[u8; 32]> = None; let mut claimable_htlc_purposes = None; let mut claimable_htlc_onion_fields = None; let mut pending_claiming_payments = Some(new_hash_map()); - let mut monitor_update_blocked_actions_per_peer: Option>)>> = Some(Vec::new()); + let mut monitor_update_blocked_actions_per_peer: Option>)>> = + Some(Vec::new()); let mut events_override = None; - let mut legacy_in_flight_monitor_updates: Option>> = None; + let mut legacy_in_flight_monitor_updates: Option< + HashMap<(PublicKey, OutPoint), Vec>, + > = None; // We use this one over the legacy since they represent the same data, just with a different // key. We still need to read the legacy one as it's an even TLV. - let mut in_flight_monitor_updates: Option>> = None; + let mut in_flight_monitor_updates: Option< + HashMap<(PublicKey, ChannelId), Vec>, + > = None; let mut decode_update_add_htlcs: Option>> = None; let mut inbound_payment_id_secret = None; let mut peer_storage_dir: Option)>> = None; @@ -14882,7 +15219,8 @@ where return Err(DecodeError::InvalidValue); } if in_flight_monitor_updates.is_none() { - let in_flight_upds = in_flight_monitor_updates.get_or_insert_with(|| new_hash_map()); + let in_flight_upds = + in_flight_monitor_updates.get_or_insert_with(|| new_hash_map()); for ((counterparty_node_id, funding_txo), updates) in legacy_in_flight_upds { // All channels with legacy in flight monitor updates are v1 channels. let channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); @@ -14966,22 +15304,40 @@ where // Channels that were persisted have to be funded, otherwise they should have been // discarded. - let monitor = args.channel_monitors.get(chan_id) + let monitor = args + .channel_monitors + .get(chan_id) .expect("We already checked for monitor presence when loading channels"); let mut max_in_flight_update_id = monitor.get_latest_update_id(); if let Some(in_flight_upds) = &mut in_flight_monitor_updates { - if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, *chan_id)) { - max_in_flight_update_id = cmp::max(max_in_flight_update_id, - handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds, - monitor, peer_state, logger, "")); + if let Some(mut chan_in_flight_upds) = + in_flight_upds.remove(&(*counterparty_id, *chan_id)) + { + max_in_flight_update_id = cmp::max( + max_in_flight_update_id, + handle_in_flight_updates!( + *counterparty_id, + chan_in_flight_upds, + monitor, + peer_state, + logger, + "" + ), + ); } } - if funded_chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id { + if funded_chan.get_latest_unblocked_monitor_update_id() + > max_in_flight_update_id + { // If the channel is ahead of the monitor, return DangerousValue: log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight", chan_id, monitor.get_latest_update_id(), max_in_flight_update_id); - log_error!(logger, " but the ChannelManager is at update_id {}.", funded_chan.get_latest_unblocked_monitor_update_id()); + log_error!( + logger, + " but the ChannelManager is at update_id {}.", + funded_chan.get_latest_unblocked_monitor_update_id() + ); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); @@ -14999,25 +15355,40 @@ where if let Some(in_flight_upds) = in_flight_monitor_updates { for ((counterparty_id, channel_id), mut chan_in_flight_updates) in in_flight_upds { - let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None); + let logger = + WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None); if let Some(monitor) = args.channel_monitors.get(&channel_id) { // Now that we've removed all the in-flight monitor updates for channels that are // still open, we need to replay any monitor updates that are for closed channels, // creating the neccessary peer_state entries as we go. - let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| { - Mutex::new(empty_peer_state()) - }); + let peer_state_mutex = per_peer_state + .entry(counterparty_id) + .or_insert_with(|| Mutex::new(empty_peer_state())); let mut peer_state = peer_state_mutex.lock().unwrap(); - handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, monitor, - peer_state, logger, "closed "); + handle_in_flight_updates!( + counterparty_id, + chan_in_flight_updates, + monitor, + peer_state, + logger, + "closed " + ); } else { log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!(logger, " The ChannelMonitor for channel {} is missing.", channel_id); + log_error!( + logger, + " The ChannelMonitor for channel {} is missing.", + channel_id + ); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning"); - log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates); + log_error!( + logger, + " Pending in-flight updates are: {:?}", + chan_in_flight_updates + ); return Err(DecodeError::InvalidValue); } } @@ -15028,22 +15399,34 @@ where pending_background_events.reserve(close_background_events.len()); 'each_bg_event: for mut new_event in close_background_events { if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, channel_id, update, - } = &mut new_event { + counterparty_node_id, + funding_txo, + channel_id, + update, + } = &mut new_event + { debug_assert_eq!(update.updates.len(), 1); - debug_assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. })); + debug_assert!(matches!( + update.updates[0], + ChannelMonitorUpdateStep::ChannelForceClosed { .. } + )); let mut updated_id = false; for pending_event in pending_background_events.iter() { if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: pending_cp, funding_txo: pending_funding, - channel_id: pending_chan_id, update: pending_update, - } = pending_event { + counterparty_node_id: pending_cp, + funding_txo: pending_funding, + channel_id: pending_chan_id, + update: pending_update, + } = pending_event + { let for_same_channel = counterparty_node_id == pending_cp && funding_txo == pending_funding && channel_id == pending_chan_id; if for_same_channel { debug_assert!(update.update_id >= pending_update.update_id); - if pending_update.updates.iter().any(|upd| matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. })) { + if pending_update.updates.iter().any(|upd| { + matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. }) + }) { // If the background event we're looking at is just // force-closing the channel which already has a pending // force-close update, no need to duplicate it. @@ -15054,18 +15437,23 @@ where } } } - let mut per_peer_state = per_peer_state.get(counterparty_node_id) + let mut per_peer_state = per_peer_state + .get(counterparty_node_id) .expect("If we have pending updates for a channel it must have an entry") - .lock().unwrap(); + .lock() + .unwrap(); if updated_id { per_peer_state - .closed_channel_monitor_update_ids.entry(*channel_id) + .closed_channel_monitor_update_ids + .entry(*channel_id) .and_modify(|v| *v = cmp::max(update.update_id, *v)) .or_insert(update.update_id); } - let in_flight_updates = &mut per_peer_state.in_flight_monitor_updates + let in_flight_updates = &mut per_peer_state + .in_flight_monitor_updates .entry(*channel_id) - .or_insert_with(|| (*funding_txo, Vec::new())).1; + .or_insert_with(|| (*funding_txo, Vec::new())) + .1; debug_assert!(!in_flight_updates.iter().any(|upd| upd == update)); in_flight_updates.push(update.clone()); } @@ -15096,9 +15484,17 @@ where } if is_channel_closed { - for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { - let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); - if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source { + for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() + { + let logger = WithChannelMonitor::from( + &args.logger, + monitor, + Some(htlc.payment_hash), + ); + if let HTLCSource::OutboundRoute { + payment_id, session_priv, path, .. + } = htlc_source + { if path.hops.is_empty() { log_error!(logger, "Got an empty path for a pending payment"); return Err(DecodeError::InvalidValue); @@ -15107,17 +15503,28 @@ where let mut session_priv_bytes = [0; 32]; session_priv_bytes[..].copy_from_slice(&session_priv[..]); pending_outbounds.insert_from_monitor_on_startup( - payment_id, htlc.payment_hash, session_priv_bytes, &path, best_block_height, logger + payment_id, + htlc.payment_hash, + session_priv_bytes, + &path, + best_block_height, + logger, ); } } - for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() { - let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); + for (htlc_source, (htlc, preimage_opt)) in + monitor.get_all_current_outbound_htlcs() + { + let logger = WithChannelMonitor::from( + &args.logger, + monitor, + Some(htlc.payment_hash), + ); match htlc_source { HTLCSource::PreviousHopData(prev_hop_data) => { let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint && - info.prev_htlc_id == prev_hop_data.htlc_id + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id }; // The ChannelMonitor is now responsible for this HTLC's // failure/success and will let us know what its outcome is. If we @@ -15161,7 +15568,13 @@ where } else { true } }); }, - HTLCSource::OutboundRoute { payment_id, session_priv, path, bolt12_invoice, .. } => { + HTLCSource::OutboundRoute { + payment_id, + session_priv, + path, + bolt12_invoice, + .. + } => { if let Some(preimage) = preimage_opt { let pending_events = Mutex::new(pending_events_read); // Note that we set `from_onchain` to "false" here, @@ -15178,8 +15591,17 @@ where channel_id: monitor.channel_id(), counterparty_node_id: path.hops[0].pubkey, }; - pending_outbounds.claim_htlc(payment_id, preimage, bolt12_invoice, session_priv, - path, false, compl_action, &pending_events, &&logger); + pending_outbounds.claim_htlc( + payment_id, + preimage, + bolt12_invoice, + session_priv, + path, + false, + compl_action, + &pending_events, + &&logger, + ); pending_events_read = pending_events.into_inner().unwrap(); } }, @@ -15302,15 +15724,19 @@ where } } - if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() { + if !forward_htlcs.is_empty() + || !decode_update_add_htlcs.is_empty() + || pending_outbounds.needs_abandon() + { // If we have pending HTLCs to forward, assume we either dropped a // `PendingHTLCsForwardable` or the user received it but never processed it as they // shut down before the timer hit. Either way, set the time_forwardable to a small // constant as enough time has likely passed that we should simply handle the forwards // now, or at least after the user gets a chance to reconnect to our peers. - pending_events_read.push_back((events::Event::PendingHTLCsForwardable { - time_forwardable: Duration::from_secs(2), - }, None)); + pending_events_read.push_back(( + events::Event::PendingHTLCsForwardable { time_forwardable: Duration::from_secs(2) }, + None, + )); } let expanded_inbound_key = args.node_signer.get_inbound_payment_key(); @@ -15324,20 +15750,29 @@ where if onion_fields.len() != claimable_htlcs_list.len() { return Err(DecodeError::InvalidValue); } - for (purpose, (onion, (payment_hash, htlcs))) in - purposes.into_iter().zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter())) + for (purpose, (onion, (payment_hash, htlcs))) in purposes + .into_iter() + .zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter())) { - let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment { - purpose, htlcs, onion_fields: onion, - }); - if existing_payment.is_some() { return Err(DecodeError::InvalidValue); } + let existing_payment = claimable_payments.insert( + payment_hash, + ClaimablePayment { purpose, htlcs, onion_fields: onion }, + ); + if existing_payment.is_some() { + return Err(DecodeError::InvalidValue); + } } } else { - for (purpose, (payment_hash, htlcs)) in purposes.into_iter().zip(claimable_htlcs_list.into_iter()) { - let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment { - purpose, htlcs, onion_fields: None, - }); - if existing_payment.is_some() { return Err(DecodeError::InvalidValue); } + for (purpose, (payment_hash, htlcs)) in + purposes.into_iter().zip(claimable_htlcs_list.into_iter()) + { + let existing_payment = claimable_payments.insert( + payment_hash, + ClaimablePayment { purpose, htlcs, onion_fields: None }, + ); + if existing_payment.is_some() { + return Err(DecodeError::InvalidValue); + } } } } else { @@ -15351,26 +15786,31 @@ where OnionPayload::Invoice { _legacy_hop_data } => { if let Some(hop_data) = _legacy_hop_data { events::PaymentPurpose::Bolt11InvoicePayment { - payment_preimage: - match inbound_payment::verify( - payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger - ) { - Ok((payment_preimage, _)) => payment_preimage, - Err(()) => { - log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash); - return Err(DecodeError::InvalidValue); - } + payment_preimage: match inbound_payment::verify( + payment_hash, + &hop_data, + 0, + &expanded_inbound_key, + &args.logger, + ) { + Ok((payment_preimage, _)) => payment_preimage, + Err(()) => { + log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash); + return Err(DecodeError::InvalidValue); }, + }, payment_secret: hop_data.payment_secret, } - } else { return Err(DecodeError::InvalidValue); } + } else { + return Err(DecodeError::InvalidValue); + } + }, + OnionPayload::Spontaneous(payment_preimage) => { + events::PaymentPurpose::SpontaneousPayment(*payment_preimage) }, - OnionPayload::Spontaneous(payment_preimage) => - events::PaymentPurpose::SpontaneousPayment(*payment_preimage), }; - claimable_payments.insert(payment_hash, ClaimablePayment { - purpose, htlcs, onion_fields: None, - }); + claimable_payments + .insert(payment_hash, ClaimablePayment { purpose, htlcs, onion_fields: None }); } } @@ -15406,7 +15846,7 @@ where let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) { Ok(key) => key, - Err(()) => return Err(DecodeError::InvalidValue) + Err(()) => return Err(DecodeError::InvalidValue), }; if let Some(network_pubkey) = received_network_pubkey { if network_pubkey != our_network_pubkey { @@ -15426,21 +15866,44 @@ where let mut outbound_scid_alias; loop { outbound_scid_alias = fake_scid::Namespace::OutboundAlias - .get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source); - if outbound_scid_aliases.insert(outbound_scid_alias) { break; } + .get_fake_scid( + best_block_height, + &chain_hash, + fake_scid_rand_bytes.as_ref().unwrap(), + &args.entropy_source, + ); + if outbound_scid_aliases.insert(outbound_scid_alias) { + break; + } } funded_chan.context.set_outbound_scid_alias(outbound_scid_alias); - } else if !outbound_scid_aliases.insert(funded_chan.context.outbound_scid_alias()) { + } else if !outbound_scid_aliases + .insert(funded_chan.context.outbound_scid_alias()) + { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(logger, "Got duplicate outbound SCID alias; {}", funded_chan.context.outbound_scid_alias()); + log_error!( + logger, + "Got duplicate outbound SCID alias; {}", + funded_chan.context.outbound_scid_alias() + ); return Err(DecodeError::InvalidValue); } if funded_chan.context.is_usable() { - if short_to_chan_info.insert(funded_chan.context.outbound_scid_alias(), (funded_chan.context.get_counterparty_node_id(), *chan_id)).is_some() { + if short_to_chan_info + .insert( + funded_chan.context.outbound_scid_alias(), + (funded_chan.context.get_counterparty_node_id(), *chan_id), + ) + .is_some() + { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. - log_error!(logger, "Got duplicate outbound SCID alias; {}", funded_chan.context.outbound_scid_alias()); + log_error!( + logger, + "Got duplicate outbound SCID alias; {}", + funded_chan.context.outbound_scid_alias() + ); return Err(DecodeError::InvalidValue); } } @@ -15455,10 +15918,13 @@ where let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator); - for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() { + for (node_id, monitor_update_blocked_actions) in + monitor_update_blocked_actions_per_peer.unwrap() + { if let Some(peer_state) = per_peer_state.get(&node_id) { for (channel_id, actions) in monitor_update_blocked_actions.iter() { - let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None); + let logger = + WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None); for action in actions.iter() { if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { downstream_counterparty_and_funding_outpoint: @@ -15467,15 +15933,21 @@ where funding_txo: _, channel_id: blocked_channel_id, blocking_action, - }), .. - } = action { + }), + .. + } = action + { if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) { log_trace!(logger, "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", blocked_channel_id); - blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates + blocked_peer_state + .lock() + .unwrap() + .actions_blocking_raa_monitor_updates .entry(*blocked_channel_id) - .or_insert_with(Vec::new).push(blocking_action.clone()); + .or_insert_with(Vec::new) + .push(blocking_action.clone()); } else { // If the channel we were blocking has closed, we don't need to // worry about it - the blocked monitor update should never have @@ -15484,7 +15956,10 @@ where // anymore. } } - if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { .. } = action { + if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { + .. + } = action + { debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue"); } } @@ -15494,7 +15969,8 @@ where // `ChannelManager` was serialized. In that case, we'll run the post-update // actions as soon as we get going. } - peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions; + peer_state.lock().unwrap().monitor_update_blocked_actions = + monitor_update_blocked_actions; } else { for actions in monitor_update_blocked_actions.values() { for action in actions.iter() { @@ -15508,7 +15984,12 @@ where // which we ignore here. } else { let logger = WithContext::from(&args.logger, Some(node_id), None, None); - log_error!(logger, "Got blocked actions {:?} without a per-peer-state for {}", monitor_update_blocked_actions, node_id); + log_error!( + logger, + "Got blocked actions {:?} without a per-peer-state for {}", + monitor_update_blocked_actions, + node_id + ); return Err(DecodeError::InvalidValue); } } @@ -15518,9 +15999,13 @@ where let best_block = BestBlock::new(best_block_hash, best_block_height); let flow = OffersMessageFlow::new( - chain_hash, best_block, our_network_pubkey, - highest_seen_timestamp, expanded_inbound_key, - secp_ctx.clone(), args.message_router + chain_hash, + best_block, + our_network_pubkey, + highest_seen_timestamp, + expanded_inbound_key, + secp_ctx.clone(), + args.message_router, ); let channel_manager = ChannelManager { @@ -15539,7 +16024,10 @@ where forward_htlcs: Mutex::new(forward_htlcs), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), - claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }), + claimable_payments: Mutex::new(ClaimablePayments { + claimable_payments, + pending_claiming_payments: pending_claiming_payments.unwrap(), + }), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), short_to_chan_info: FairRwLock::new(short_to_chan_info), fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(), @@ -15568,7 +16056,6 @@ where funding_batch_states: Mutex::new(BTreeMap::new()), - pending_broadcast_messages: Mutex::new(Vec::new()), entropy_source: args.entropy_source, @@ -15586,7 +16073,8 @@ where let mut processed_claims: HashSet> = new_hash_set(); for (_, monitor) in args.channel_monitors.iter() { - for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() { + for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() + { if !payment_claims.is_empty() { for payment_claim in payment_claims { if processed_claims.contains(&payment_claim.mpp_parts) { @@ -15602,8 +16090,12 @@ where { let payments = channel_manager.claimable_payments.lock().unwrap(); if !payments.claimable_payments.contains_key(&payment_hash) { - if let Some(payment) = payments.pending_claiming_payments.get(&payment_hash) { - if payment.payment_id == payment_claim.claiming_payment.payment_id { + if let Some(payment) = + payments.pending_claiming_payments.get(&payment_hash) + { + if payment.payment_id + == payment_claim.claiming_payment.payment_id + { // If this payment already exists and was marked as // being-claimed then the serialized state must contain all // of the pending `ChannelMonitorUpdate`s required to get @@ -15615,8 +16107,16 @@ where } } - let mut channels_without_preimage = payment_claim.mpp_parts.iter() - .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.funding_txo, htlc_info.channel_id)) + let mut channels_without_preimage = payment_claim + .mpp_parts + .iter() + .map(|htlc_info| { + ( + htlc_info.counterparty_node_id, + htlc_info.funding_txo, + htlc_info.channel_id, + ) + }) .collect::>(); // If we have multiple MPP parts which were received over the same channel, // we only track it once as once we get a preimage durably in the @@ -15637,16 +16137,26 @@ where // preimages eventually timing out from ChannelMonitors to prevent us from // doing so forever. - let claim_found = - channel_manager.claimable_payments.lock().unwrap().begin_claiming_payment( - payment_hash, &channel_manager.node_signer, &channel_manager.logger, - &channel_manager.inbound_payment_id_secret, true, + let claim_found = channel_manager + .claimable_payments + .lock() + .unwrap() + .begin_claiming_payment( + payment_hash, + &channel_manager.node_signer, + &channel_manager.logger, + &channel_manager.inbound_payment_id_secret, + true, ); if claim_found.is_err() { - let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap(); + let mut claimable_payments = + channel_manager.claimable_payments.lock().unwrap(); match claimable_payments.pending_claiming_payments.entry(payment_hash) { hash_map::Entry::Occupied(_) => { - debug_assert!(false, "Entry was added in begin_claiming_payment"); + debug_assert!( + false, + "Entry was added in begin_claiming_payment" + ); return Err(DecodeError::InvalidValue); }, hash_map::Entry::Vacant(entry) => { @@ -15656,22 +16166,38 @@ where } for part in payment_claim.mpp_parts.iter() { - let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| ( - part.counterparty_node_id, part.channel_id, - PendingMPPClaimPointer(Arc::clone(&ptr)) - )); - let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| + let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| { + ( + part.counterparty_node_id, + part.channel_id, + PendingMPPClaimPointer(Arc::clone(&ptr)), + ) + }); + let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| { RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)), } - ); + }); // Note that we don't need to pass the `payment_info` here - its // already (clearly) durably on disk in the `ChannelMonitor` so there's // no need to worry about getting it into others. + // + // We don't encode any attribution data, because the required onion shared secret isn't + // available here. channel_manager.claim_mpp_part( - part.into(), payment_preimage, None, - |_, _| - (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim }), pending_claim_ptr) + part.into(), + payment_preimage, + None, + None, + |_, _| { + ( + Some(MonitorUpdateCompletionAction::PaymentClaimed { + payment_hash, + pending_mpp_claim, + }), + pending_claim_ptr, + ) + }, ); } processed_claims.insert(payment_claim.mpp_parts); @@ -15687,7 +16213,9 @@ where let mut receiver_node_id = Some(our_network_pubkey); let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret; if phantom_shared_secret.is_some() { - let phantom_pubkey = channel_manager.node_signer.get_node_id(Recipient::PhantomNode) + let phantom_pubkey = channel_manager + .node_signer + .get_node_id(Recipient::PhantomNode) .expect("Failed to get node_id for phantom node recipient"); receiver_node_id = Some(phantom_pubkey) } @@ -15715,17 +16243,27 @@ where let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(channel) = peer_state.channel_by_id + if let Some(channel) = peer_state + .channel_by_id .get_mut(&previous_channel_id) .and_then(Channel::as_funded_mut) { - let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash)); - channel.claim_htlc_while_disconnected_dropping_mon_update_legacy( - claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger + let logger = WithChannelContext::from( + &channel_manager.logger, + &channel.context, + Some(payment_hash), ); + channel + .claim_htlc_while_disconnected_dropping_mon_update_legacy( + claimable_htlc.prev_hop.htlc_id, + payment_preimage, + &&logger, + ); } } - if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id) { + if let Some(previous_hop_monitor) = + args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id) + { // Note that this is unsafe as we no longer require the // `ChannelMonitor`s to be re-persisted prior to this // `ChannelManager` being persisted after we get started running. @@ -15739,23 +16277,37 @@ where // for nodes during upgrade, and we explicitly require the old // persistence semantics on upgrade in the release notes. previous_hop_monitor.provide_payment_preimage_unsafe_legacy( - &payment_hash, &payment_preimage, &channel_manager.tx_broadcaster, - &channel_manager.fee_estimator, &channel_manager.logger + &payment_hash, + &payment_preimage, + &channel_manager.tx_broadcaster, + &channel_manager.fee_estimator, + &channel_manager.logger, ); } } let mut pending_events = channel_manager.pending_events.lock().unwrap(); - let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap()); - pending_events.push_back((events::Event::PaymentClaimed { - receiver_node_id, - payment_hash, - purpose: payment.purpose, - amount_msat: claimable_amt_msat, - htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(), - sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat), - onion_fields: payment.onion_fields, - payment_id: Some(payment_id), - }, None)); + let payment_id = + payment.inbound_payment_id(&inbound_payment_id_secret.unwrap()); + pending_events.push_back(( + events::Event::PaymentClaimed { + receiver_node_id, + payment_hash, + purpose: payment.purpose, + amount_msat: claimable_amt_msat, + htlcs: payment + .htlcs + .iter() + .map(events::ClaimedHTLC::from) + .collect(), + sender_intended_total_msat: payment + .htlcs + .first() + .map(|htlc| htlc.total_msat), + onion_fields: payment.onion_fields, + payment_id: Some(payment_id), + }, + None, + )); } } } @@ -15764,18 +16316,41 @@ where for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::ChannelClosed; - let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { + node_id: Some(counterparty_node_id), + channel_id, + }; let reason = HTLCFailReason::from_failure_code(failure_reason); channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay { + for ( + source, + preimage, + downstream_value, + downstream_closed, + downstream_node_id, + downstream_funding, + downstream_channel_id, + ) in pending_claims_to_replay + { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. - channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None, - downstream_closed, true, downstream_node_id, downstream_funding, - downstream_channel_id, None + // The same holds for attribution data. We don't have any, so we pass an empty one. + channel_manager.claim_funds_internal( + source, + preimage, + Some(downstream_value), + None, + downstream_closed, + true, + downstream_node_id, + downstream_funding, + downstream_channel_id, + None, + None, + None, ); } diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 4f1f5c581df..95c240600ba 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -1785,6 +1785,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { channel_id: chan.2, htlc_id: 0, payment_preimage: our_payment_preimage, + attribution_data: None, }; nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_msg); diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index c9f1741cdbb..5a1e34ee33b 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -54,6 +54,8 @@ pub use onion_utils::{create_payment_onion, LocalHTLCFailureReason}; #[cfg(fuzzing)] pub use onion_utils::process_onion_failure; +#[cfg(fuzzing)] +pub use onion_utils::process_onion_success; #[cfg(fuzzing)] pub use onion_utils::AttributionData; diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 2cf7e109eb1..e787de09347 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -793,6 +793,9 @@ pub struct UpdateFulfillHTLC { pub htlc_id: u64, /// The pre-image of the payment hash, allowing HTLC redemption pub payment_preimage: PaymentPreimage, + + /// Optional field for the attribution data that allows the sender to pinpoint the failing node under all conditions + pub attribution_data: Option, } /// A [`peer_storage`] message that can be sent to or received from a peer. @@ -3166,7 +3169,10 @@ impl_writeable_msg!(UpdateFulfillHTLC, { channel_id, htlc_id, payment_preimage -}, {}); +}, { + // Specified TLV key 1 plus 100 during testing phase. + (101, attribution_data, option) +}); impl_writeable_msg!(PeerStorage, { data }, {}); @@ -5649,6 +5655,7 @@ mod tests { channel_id: ChannelId::from_bytes([2; 32]), htlc_id: 2316138423780173, payment_preimage: PaymentPreimage([1; 32]), + attribution_data: None, }; let encoded_value = update_fulfill_htlc.encode(); let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0101010101010101010101010101010101010101010101010101010101010101").unwrap(); diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index c2187ecf7b6..fcc2095464f 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -880,10 +880,7 @@ fn crypt_failure_packet(shared_secret: &[u8], packet: &mut OnionErrorPacket) { chacha.process_in_place(&mut packet.data); if let Some(ref mut attribution_data) = packet.attribution_data { - let ammagext = gen_ammagext_from_shared_secret(&shared_secret); - let mut chacha = ChaCha20::new(&ammagext, &[0u8; 8]); - chacha.process_in_place(&mut attribution_data.hold_times); - chacha.process_in_place(&mut attribution_data.hmacs); + attribution_data.crypt(shared_secret); } } @@ -945,10 +942,7 @@ fn update_attribution_data( let attribution_data = onion_error_packet.attribution_data.get_or_insert(AttributionData::new()); - let hold_time_bytes: [u8; 4] = hold_time.to_be_bytes(); - attribution_data.hold_times[..HOLD_TIME_LEN].copy_from_slice(&hold_time_bytes); - - attribution_data.add_hmacs(shared_secret, &onion_error_packet.data); + attribution_data.update(&onion_error_packet.data, shared_secret, hold_time); } pub(super) fn build_failure_packet( @@ -1469,6 +1463,55 @@ where } } +/// Process failure we got back from upstream on a payment we sent (implying htlc_source is an +/// OutboundRoute). +pub fn process_onion_success( + secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, + mut attribution_data: AttributionData, +) -> Vec +where + L::Target: Logger, +{ + let mut hold_times = Vec::new(); + + // Only consider hops in the regular path for attribution data. Blinded path attribution data isn't accessible. + let shared_secrets = + construct_onion_keys_generic(secp_ctx, &path.hops, None, outer_session_priv) + .map(|(shared_secret, _, _, _, _)| shared_secret); + + // In the best case, paths can be up to 27 hops. But attribution data can only be conveyed back to the sender from + // the first 20 hops. Determine the number of hops to be used for attribution data. + let attributable_hop_count = usize::min(path.hops.len(), MAX_HOPS); + + for (route_hop_idx, shared_secret) in shared_secrets.enumerate().take(attributable_hop_count) { + attribution_data.crypt(shared_secret.as_ref()); + + // Calculate position relative to the last attributable hop. The last attributable hop is at position 0. We need + // to look at the chain of HMACs that does include all data up to the last attributable hop. Hold times beyond + // the last attributable hop will not be available. + let position = attributable_hop_count - route_hop_idx - 1; + let hold_time = attribution_data.verify(&Vec::new(), shared_secret.as_ref(), position); + if let Some(hold_time) = hold_time { + hold_times.push(hold_time); + + log_debug!(logger, "Htlc hold time at pos {}: {} ms", route_hop_idx, hold_time); + + // Shift attribution data to prepare for processing the next hop. + attribution_data.shift_left(); + } else { + log_debug!( + logger, + "Invalid HMAC in attribution data for node at pos {}", + route_hop_idx + ); + + break; + } + } + + hold_times +} + const BADONION: u16 = 0x8000; const PERM: u16 = 0x4000; const NODE: u16 = 0x2000; @@ -2657,6 +2700,14 @@ impl_writeable!(AttributionData, { }); impl AttributionData { + /// Encrypts or decrypts the attribution data using the provided shared secret. + pub(crate) fn crypt(&mut self, shared_secret: &[u8]) { + let ammagext = gen_ammagext_from_shared_secret(&shared_secret); + let mut chacha = ChaCha20::new(&ammagext, &[0u8; 8]); + chacha.process_in_place(&mut self.hold_times); + chacha.process_in_place(&mut self.hmacs); + } + /// Adds the current node's HMACs for all possible positions to this packet. pub(crate) fn add_hmacs(&mut self, shared_secret: &[u8], message: &[u8]) { let um: [u8; 32] = gen_um_from_shared_secret(&shared_secret); @@ -2706,7 +2757,7 @@ impl AttributionData { /// Verifies the attribution data of a failure packet for the given position in the path. If the HMAC checks out, the /// reported hold time is returned. If the HMAC does not match, None is returned. - fn verify(&self, message: &Vec, shared_secret: &[u8], position: usize) -> Option { + fn verify(&self, message: &[u8], shared_secret: &[u8], position: usize) -> Option { // Calculate the expected HMAC. let um = gen_um_from_shared_secret(shared_secret); let mut hmac = HmacEngine::::new(&um); @@ -2753,7 +2804,7 @@ impl AttributionData { /// Shifts hold times and HMACS to the right, taking into account HMAC pruning. Intermediate nodes do this to create /// space for prepending their own hold time and HMACs. - fn shift_right(&mut self) { + pub(crate) fn shift_right(&mut self) { // Shift hold times right. This will free up HOLD_TIME_LEN bytes at the beginning of the array. self.hold_times.copy_within(..(MAX_HOPS - 1) * HOLD_TIME_LEN, HOLD_TIME_LEN); @@ -2791,6 +2842,12 @@ impl AttributionData { fn get_hold_time_bytes(&self, idx: usize) -> &[u8] { &self.hold_times[idx * HOLD_TIME_LEN..(idx + 1) * HOLD_TIME_LEN] } + + pub(crate) fn update(&mut self, message: &[u8], shared_secret: &[u8], hold_time: u32) { + let hold_time_bytes: [u8; 4] = hold_time.to_be_bytes(); + self.hold_times[..HOLD_TIME_LEN].copy_from_slice(&hold_time_bytes); + self.add_hmacs(shared_secret, message); + } } /// Updates the attribution data for an intermediate node. @@ -3335,6 +3392,65 @@ mod tests { process_onion_failure(&ctx_full, &logger, &htlc_source, onion_error) } + #[test] + fn test_success_hold_times() { + fn assert_data(actual: &AttributionData, expected: &str) { + let (expected_hold_times, expected_hmacs) = + expected.split_at(MAX_HOPS * HOLD_TIME_LEN * 2); + + println!( + "{}{}", + actual.hold_times.to_lower_hex_string(), + actual.hmacs.to_lower_hex_string() + ); + + assert_eq!(actual.hold_times.to_lower_hex_string(), expected_hold_times); + assert_eq!(actual.hmacs.to_lower_hex_string(), expected_hmacs); + } + + const EXPECTED_MESSAGES: [&str; 5] = [ + "d77d0711b5f71d1d1be56bd88b3bb7ebc1792bb739ea7ebc1bc3b031b8bc2df3a50e25aeb99f47d7f7ab39e24187d3f4df9c4333463b053832ee9ac07274a5261b8b2a01fc09ce9ea7cd04d7b585dfb83299fb6570d71f793c1fcac0ef498766952c8c6840efa02a567d558a3cf6822b12476324b9b9efa03e5f8f26f81fa93daac46cbf00c98e69b6747cf69caaa2a71b025bd18830c4c54cd08f598cfde6197b3f2a951aba907c964c0f5d19a44e6d1d7279637321fa598adde927b3087d238f8b426ecde500d318617cdb7a56e6ce3520fc95be41a549973764e4dc483853ecc313947709f1b5199cb077d46e701fa633e11d3e13b03e9212c115ca6fa004b2f3dd912814693b705a561a06da54cdf603677a3abecdc22c7358c2de3cef771b366a568150aeecc86ad1990bb0f4e2865933b03ea0df87901bff467908273dc6cea31cbab0e2b8d398d10b001058c259ed221b7b55762f4c7e49c8c11a45a107b7a2c605c26dc5b0b10d719b1c844670102b2b6a36c43fe4753a78a483fc39166ae28420f112d50c10ee64ca69569a2f690712905236b7c2cb7ac8954f02922d2d918c56d42649261593c47b14b324a65038c3c5be8d3c403ce0c8f19299b1664bf077d7cf1636c4fb9685a8e58b7029fd0939fa07925a60bed339b23f973293598f595e75c8f9d455d7cebe4b5e23357c8bd47d66d6628b39427e37e0aecbabf46c11be6771f7136e108a143ae9bafba0fc47a51b6c7deef4cba54bae906398ee3162a41f2191ca386b628bde7e1dd63d1611aa01a95c456df337c763cb8c3a81a6013aa633739d8cd554c688102211725e6adad165adc1bcd429d020c51b4b25d2117e8bb27eb0cc7020f9070d4ad19ac31a76ebdf5f9246646aeadbfb9a3f1d75bd8237961e786302516a1a781780e8b73f58dc06f307e58bd0eb1d8f5c9111f01312974c1dc777a6a2d3834d8a2a40014e9818d0685cb3919f6b3b788ddc640b0ff9b1854d7098c7dd6f35196e902b26709640bc87935a3914869a807e8339281e9cedaaca99474c3e7bdd35050bb998ab4546f9900904e0e39135e861ff7862049269701081ebce32e4cca992c6967ff0fd239e38233eaf614af31e186635e9439ec5884d798f9174da6ff569d68ed5c092b78bd3f880f5e88a7a8ab36789e1b57b035fb6c32a6358f51f83e4e5f46220bcad072943df8bd9541a61b7dae8f30fa3dd5fb39b1fd9a0b8e802552b78d4ec306ecee15bfe6da14b29ba6d19ce5be4dd478bca74a52429cd5309d404655c3dec85c252", + "1571e10db7f8aa9f8e7e99caaf9c892e106c817df1d8e3b7b0e39d1c48f631e473e17e205489dd7b3c634cac3be0825cbf01418cd46e83c24b8d9c207742db9a0f0e5bcd888086498159f08080ba7bf3ea029c0b493227c4e75a90f70340d9e21f00979fc7e4fb2078477c1a457ba242ed54b313e590b13a2a13bfeed753dab133c78059f460075b2594b4c31c50f31076f8f1a0f7ad0530d0fadaf2d86e505ff9755940ec0665f9e5bc58cad6e523091f94d0bcd3c6c65ca1a5d401128dcc5e14f9108b32e660017c13de598bcf9d403710857cccb0fb9c2a81bfd66bc4552e1132afa3119203a4aaa1e8839c1dab8cbdcde7b527aca3f54bde651aa9f3f2178829cee3f1c0b9292758a40cc63bd998fcd0d3ed4bdcaf1023267b8f8e44130a63ad15f76145936552381eabb6d684c0a3af6ba8efcf207cebaea5b7acdbb63f8e7221102409d10c23f0514dc9f4d0efb2264161a193a999a23e992632710580a0d320f676d367b9190721194514457761af05207cdab2b6328b1b3767eacb36a7ef4f7bd2e16762d13df188e0898b7410f62459458712a44bf594ae662fd89eb300abb6952ff8ad40164f2bcd7f86db5c7650b654b79046de55d51aa8061ce35f867a3e8f5bf98ad920be827101c64fb871d86e53a4b3c0455bfac5784168218aa72cbee86d9c750a9fa63c363a8b43d7bf4b2762516706a306f0aa3be1ec788b5e13f8b24837e53ac414f211e11c7a093cd9653dfa5fba4e377c79adfa5e841e2ddb6afc054fc715c05ddc6c8fc3e1ee3406e1ffceb2df77dc2f02652614d1bfcfaddebaa53ba919c7051034e2c7b7cfaabdf89f26e7f8e3f956d205dfab747ad0cb505b85b54a68439621b25832cbc2898919d0cd7c0a64cfd235388982dd4dd68240cb668f57e1d2619a656ed326f8c92357ee0d9acead3c20008bc5f04ca8059b55d77861c6d04dfc57cfba57315075acbe1451c96cf28e1e328e142890248d18f53b5d3513ce574dea7156cf596fdb3d909095ec287651f9cf1bcdc791c5938a5dd9b47e84c004d24ab3ae74492c7e8dcc1da15f65324be2672947ec82074cac8ce2b925bc555facbbf1b55d63ea6fbea6a785c97d4caf2e1dad9551b7f66c31caae5ebc7c0047e892f201308fcf452c588be0e63d89152113d87bf0dbd01603b4cdc7f0b724b0714a9851887a01f709408882e18230fe810b9fafa58a666654576d8eba3005f07221f55a6193815a672e5db56204053bc4286fa3db38250396309fd28011b5708a26a2d76c4a333b69b6bfd272fb", + "34e34397b8621ec2f2b54dbe6c14073e267324cd60b152bce76aec8729a6ddefb61bc263be4b57bd592aae604a32bea69afe6ef4a6b573c26b17d69381ec1fc9b5aa769d148f2f1f8b5377a73840bb6dffc324ded0d1c00dc0c99e3dbc13273b2f89510af6410b525dd8836208abbbaae12753ae2276fa0ca49950374f94e187bf65cefcdd9dd9142074edc4bd0052d0eb027cb1ab6182497f9a10f9fe800b3228e3c088dab60081c807b30a67313667ca8c9e77b38b161a037cae8e973038d0fc4a97ea215914c6c4e23baf6ac4f0fb1e7fcc8aac3f6303658dae1f91588b535eb678e2200f45383c2590a55dc181a09f2209da72f79ae6745992c803310d39f960e8ecf327aed706e4b3e2704eeb9b304dc0e0685f5dcd0389ec377bdba37610ad556a0e957a413a56339dd3c40817214bced5802beee2ee545bdd713208751added5fc0eb2bc89a5aa2decb18ee37dac39f22a33b60cc1a369d24de9f3d2d8b63c039e248806de4e36a47c7a0aed30edd30c3d62debdf1ad82bf7aedd7edec413850d91c261e12beec7ad1586a9ad25b2db62c58ca17119d61dcc4f3e5c4520c42a8e384a45d8659b338b3a08f9e123a1d3781f5fc97564ccff2c1d97f06fa0150cfa1e20eacabefb0c339ec109336d207cc63d9170752fc58314c43e6d4a528fd0975afa85f3aa186ff1b6b8cb12c97ed4ace295b0ef5f075f0217665b8bb180246b87982d10f43c9866b22878106f5214e99188781180478b07764a5e12876ddcb709e0a0a8dd42cf004c695c6fc1669a6fd0e4a1ca54b024d0d80eac492a9e5036501f36fb25b72a054189294955830e43c18e55668337c8c6733abb09fc2d4ade18d5a853a2b82f7b4d77151a64985004f1d9218f2945b63c56fdebd1e96a2a7e49fa70acb4c39873947b83c191c10e9a8f40f60f3ad5a2be47145c22ea59ed3f5f4e61cb069e875fb67142d281d784bf925cc286eacc2c43e94d08da4924b83e58dbf2e43fa625bdd620eba6d9ce960ff17d14ed1f2dbee7d08eceb540fdc75ff06dabc767267658fad8ce99e2a3236e46d2deedcb51c3c6f81589357edebac9772a70b3d910d83cd1b9ce6534a011e9fa557b891a23b5d88afcc0d9856c6dabeab25eea55e9a248182229e4927f268fe5431672fcce52f434ca3d27d1a2136bae5770bb36920df12fbc01d0e8165610efa04794f414c1417f1d4059435c5385bfe2de83ce0e238d6fd2dbd3c0487c69843298577bfa480fe2a16ab2a0e4bc712cd8b5a14871cda61c993b6835303d9043d7689a", + "74a4ea61339463642a2182758871b2ea724f31f531aa98d80f1c3043febca41d5ee52e8b1e127e61719a0d078db8909748d57839e58424b91f063c4fbc8a221bef261140e66a9b596ca6d420a973ad5431adfa8280a7355462fe50d4cac15cdfbd7a535c4b72a0b6d7d8a64cff3f719ff9b8be28036826342dc3bf3781efc70063d1e6fc79dff86334ae0564a5ab87bd61f8446465ef6713f8c4ef9d0200ebb375f90ee115216b469af42de554622df222858d30d733af1c9223e327ae09d9126be8baee6dd59a112d83a57cc6e0252104c11bc11705d384220eedd72f1a29a0597d97967e28b2ad13ba28b3d8a53c3613c1bb49fe9700739969ef1f795034ef9e2e983af2d3bbd6c637fb12f2f7dfc3aee85e08711e9b604106e95d7a4974e5b047674a6015792dae5d913681d84f71edd415910582e5d86590df2ecfd561dc6e1cdb08d3e10901312326a45fb0498a177319389809c6ba07a76cfad621e07b9af097730e94df92fbd311b2cb5da32c80ab5f14971b6d40f8e2ab202ac98bd8439790764a40bf309ea2205c1632610956495720030a25dc7118e0c868fdfa78c3e9ecce58215579a0581b3bafdb7dbbe53be9e904567fdc0ce1236aab5d22f1ebc18997e3ea83d362d891e04c5785fd5238326f767bce499209f8db211a50e1402160486e98e7235cf397dbb9ae19fd9b79ef589c821c6f99f28be33452405a003b33f4540fe0a41dfcc286f4d7cc10b70552ba7850869abadcd4bb7f256823face853633d6e2a999ac9fcd259c71d08e266db5d744e1909a62c0db673745ad9585949d108ab96640d2bc27fb4acac7fa8b170a30055a5ede90e004df9a44bdc29aeb4a6bec1e85dde1de6aaf01c6a5d12405d0bec22f49026cb23264f8c04b8401d3c2ab6f2e109948b6193b3bec27adfe19fb8afb8a92364d6fc5b219e8737d583e7ff3a4bcb75d53edda3bf3f52896ac36d8a877ad9f296ea6c045603fc62ac4ae41272bde85ef7c3b3fd3538aacfd5b025fefbe277c2906821ecb20e6f75ea479fa3280f9100fb0089203455c56b6bc775e5c2f0f58c63edd63fa3eec0b40da4b276d0d41da2ec0ead865a98d12bc694e23d8eaadd2b4d0ee88e9570c88fb878930f492e036d27998d593e47763927ff7eb80b188864a3846dd2238f7f95f4090ed399ae95deaeb37abca1cf37c397cc12189affb42dca46b4ff6988eb8c060691d155302d448f50ff70a794d97c0408f8cee9385d6a71fa412e36edcb22dbf433db9db4779f27b682ee17fc05e70c8e794b9f7f6d1", + "84986c936d26bfd3bb2d34d3ec62cfdb63e0032fdb3d9d75f3e5d456f73dffa7e35aab1db4f1bd3b98ff585caf004f656c51037a3f4e810d275f3f6aea0c8e3a125ebee5f374b6440bcb9bb2955ebf70c06d64090f9f6cf098200305f7f4305ba9e1350a0c3f7dab4ccf35b8399b9650d8e363bf83d3a0a09706433f0adae6562eb338b21ea6f21329b3775905e59187c325c9cbf589f5da5e915d9e5ad1d21aa1431f9bdc587185ed8b5d4928e697e67cc96bee6d5354e3764cede3f385588fa665310356b2b1e68f8bd30c75d395405614a40a587031ebd6ace60dfb7c6dd188b572bd8e3e9a47b06c2187b528c5ed35c32da5130a21cd881138a5fcac806858ce6c596d810a7492eb261bcc91cead1dae75075b950c2e81cecf7e5fdb2b51df005d285803201ce914dfbf3218383829a0caa8f15486dd801133f1ed7edec436730b0ec98f48732547927229ac80269fcdc5e4f4db264274e940178732b429f9f0e582c559f994a7cdfb76c93ffc39de91ff936316726cc561a6520d47b2cd487299a96322dadc463ef06127fc63902ff9cc4f265e2fbd9de3fa5e48b7b51aa0850580ef9f3b5ebb60c6c3216c5a75a93e82936113d9cad57ae4a94dd6481954a9bd1b5cff4ab29ca221fa2bf9b28a362c9661206f896fc7cec563fb80aa5eaccb26c09fa4ef7a981e63028a9c4dac12f82ccb5bea090d56bbb1a4c431e315d9a169299224a8dbd099fb67ea61dfc604edf8a18ee742550b636836bb552dabb28820221bf8546331f32b0c143c1c89310c4fa2e1e0e895ce1a1eb0f43278fdb528131a3e32bfffe0c6de9006418f5309cba773ca38b6ad8507cc59445ccc0257506ebc16a4c01d4cd97e03fcf7a2049fea0db28447858f73b8e9fe98b391b136c9dc510288630a1f0af93b26a8891b857bfe4b818af99a1e011e6dbaa53982d29cf74ae7dffef45545279f19931708ed3eede5e82280eab908e8eb80abff3f1f023ab66869297b40da8496861dc455ac3abe1efa8a6f9e2c4eda48025d43a486a3f26f269743eaa30d6f0e1f48db6287751358a41f5b07aee0f098862e3493731fe2697acce734f004907c6f11eef189424fee52cd30ad708707eaf2e441f52bcf3d0c5440c1742458653c0c8a27b5ade784d9e09c8b47f1671901a29360e7e5e94946b9c75752a1a8d599d2a3e14ac81b84d42115cd688c8383a64fc6e7e1dc5568bb4837358ebe63207a4067af66b2027ad2ce8fb7ae3a452d40723a51fdf9f9c9913e8029a222cf81d12ad41e58860d75deb6de30ad", + ]; + + let onion_keys = build_test_onion_keys(); + + let mut attribution_data = AttributionData::new(); + attribution_data.update(&[], onion_keys[4].shared_secret.as_ref(), 1); + + let logger: Arc = Arc::new(TestLogger::new()); + + attribution_data.crypt(onion_keys[4].shared_secret.as_ref()); + + assert_data(&attribution_data, EXPECTED_MESSAGES[0]); + + for idx in (0..4).rev() { + let shared_secret = onion_keys[idx].shared_secret.as_ref(); + let hold_time = (5 - idx) as u32; + + attribution_data.shift_right(); + attribution_data.update(&[], shared_secret, hold_time); + attribution_data.crypt(shared_secret); + + assert_data(&attribution_data, EXPECTED_MESSAGES[4 - idx]); + } + + let ctx_full = Secp256k1::new(); + let path = build_test_path(); + let hold_times = process_onion_success( + &ctx_full, + &logger, + &path, + &get_test_session_key(), + attribution_data.clone(), + ); + + assert_eq!(hold_times, [5, 4, 3, 2, 1]) + } + fn build_trampoline_test_path() -> Path { Path { hops: vec![ diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 9fde71ad72e..4867f09a03a 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -10,7 +10,6 @@ //! Tests that test the payment retry logic in ChannelManager, including various edge-cases around //! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry //! payments thereafter. - use crate::chain::channelmonitor::{ ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, }; @@ -493,15 +492,15 @@ fn test_mpp_keysend() { let nodes = create_network(4, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); - let node_c_id = nodes[2].node.get_our_node_id(); + // let node_c_id = nodes[2].node.get_our_node_id(); let node_d_id = nodes[3].node.get_our_node_id(); create_announced_chan_between_nodes(&nodes, 0, 1); - create_announced_chan_between_nodes(&nodes, 0, 2); + // create_announced_chan_between_nodes(&nodes, 0, 2); create_announced_chan_between_nodes(&nodes, 1, 3); - create_announced_chan_between_nodes(&nodes, 2, 3); + // create_announced_chan_between_nodes(&nodes, 2, 3); - let recv_value = 15_000_000; + let recv_value = 5_000_000; let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::for_keysend(node_d_id, 40, true), recv_value, @@ -514,18 +513,19 @@ fn test_mpp_keysend() { let id = PaymentId([42; 32]); let hash = nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors!(nodes[0], 1); - let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&node_b_id, &mut events); let payment_secret = Some(payment_secret); - pass_along_path(&nodes[0], route[0], recv_value, hash, payment_secret, ev, false, preimage); + pass_along_path(&nodes[0], route[0], recv_value, hash, payment_secret, ev, true, preimage); + + #[cfg(feature = "std")] + std::thread::sleep(Duration::from_millis(50)); - let ev = remove_first_msg_event_to_node(&node_c_id, &mut events); - pass_along_path(&nodes[0], route[1], recv_value, hash, payment_secret, ev, true, preimage); claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, preimage.unwrap())); }