diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index 9b511ad8d44..195274e73d0 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -31,7 +31,7 @@ use crate::prelude::hash_map::Entry; use crate::prelude::{new_hash_map, HashMap}; use crate::sync::{Arc, Mutex, MutexGuard, RwLock}; -use lightning::events::HTLCDestination; +use lightning::events::HTLCHandlingFailureType; use lightning::ln::channelmanager::{AChannelManager, InterceptId}; use lightning::ln::msgs::{ErrorAction, LightningError}; use lightning::ln::types::ChannelId; @@ -883,9 +883,9 @@ where /// /// [`Event::HTLCHandlingFailed`]: lightning::events::Event::HTLCHandlingFailed pub fn htlc_handling_failed( - &self, failed_next_destination: HTLCDestination, + &self, failure_type: HTLCHandlingFailureType, ) -> Result<(), APIError> { - if let HTLCDestination::NextHopChannel { channel_id, .. } = failed_next_destination { + if let HTLCHandlingFailureType::Forward { channel_id, .. } = failure_type { let peer_by_channel_id = self.peer_by_channel_id.read().unwrap(); if let Some(counterparty_node_id) = peer_by_channel_id.get(&channel_id) { let outer_state_lock = self.per_peer_state.read().unwrap(); diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 8d28c9b4191..4dc7247ca41 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -26,7 +26,7 @@ use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS; use crate::offers::invoice::Bolt12Invoice; use crate::offers::static_invoice::StaticInvoice; use crate::types::features::ChannelTypeFeatures; -use crate::ln::msgs; +use crate::ln::{msgs, LocalHTLCFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; use crate::onion_message::messenger::Responder; @@ -466,12 +466,12 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason, }, ); -/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`]. +/// The type of HTLC handling performed in [`Event::HTLCHandlingFailed`]. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum HTLCDestination { +pub enum HTLCHandlingFailureType { /// We tried forwarding to a channel but failed to do so. An example of such an instance is when /// there is insufficient capacity in our outbound channel. - NextHopChannel { + Forward { /// The `node_id` of the next node. For backwards compatibility, this field is /// marked as optional, versions prior to 0.0.110 may not always be able to provide /// counterparty node information. @@ -480,12 +480,17 @@ pub enum HTLCDestination { channel_id: ChannelId, }, /// Scenario where we are unsure of the next node to forward the HTLC to. + /// + /// Deprecated: will only be used in versions before LDK v0.2.0. Downgrades will result in + /// this type being represented as [`Self::InvalidForward`]. UnknownNextHop { /// Short channel id we are requesting to forward an HTLC to. requested_forward_scid: u64, }, /// We couldn't forward to the outgoing scid. An example would be attempting to send a duplicate /// intercept HTLC. + /// + /// In LDK v0.2.0 and greater, this variant replaces [`Self::UnknownNextHop`]. InvalidForward { /// Short channel id we are requesting to forward an HTLC to. requested_forward_scid: u64 @@ -502,14 +507,14 @@ pub enum HTLCDestination { /// * The counterparty node modified the HTLC in transit, /// * A probing attack where an intermediary node is trying to detect if we are the ultimate /// recipient for a payment. - FailedPayment { + Receive { /// The payment hash of the payment we attempted to process. payment_hash: PaymentHash }, } -impl_writeable_tlv_based_enum_upgradable!(HTLCDestination, - (0, NextHopChannel) => { +impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingFailureType, + (0, Forward) => { (0, node_id, required), (2, channel_id, required), }, @@ -520,11 +525,36 @@ impl_writeable_tlv_based_enum_upgradable!(HTLCDestination, (0, requested_forward_scid, required), }, (3, InvalidOnion) => {}, - (4, FailedPayment) => { + (4, Receive) => { (0, payment_hash, required), }, ); +/// The reason for HTLC failures in [`Event::HTLCHandlingFailed`]. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum HTLCHandlingFailureReason { + /// The forwarded HTLC was failed back by the downstream node with an encrypted error reason. + Downstream, + /// The HTLC was failed locally by our node. + Local { + /// The reason that our node chose to fail the HTLC. + reason: LocalHTLCFailureReason, + }, +} + +impl_writeable_tlv_based_enum!(HTLCHandlingFailureReason, + (1, Downstream) => {}, + (3, Local) => { + (0, reason, required), + }, +); + +impl From for HTLCHandlingFailureReason { + fn from(value: LocalHTLCFailureReason) -> Self { + HTLCHandlingFailureReason::Local { reason: value } + } +} + /// Will be used in [`Event::HTLCIntercepted`] to identify the next hop in the HTLC's path. /// Currently only used in serialization for the sake of maintaining compatibility. More variants /// will be added for general-purpose HTLC forward intercepts as well as trampoline forward @@ -1460,8 +1490,12 @@ pub enum Event { HTLCHandlingFailed { /// The channel over which the HTLC was received. prev_channel_id: ChannelId, - /// Destination of the HTLC that failed to be processed. - failed_next_destination: HTLCDestination, + /// The type of HTLC handling that failed. + failure_type: HTLCHandlingFailureType, + /// The reason that the HTLC failed. + /// + /// This field will be `None` only for objects serialized prior to LDK 0.2.0. + failure_reason: Option }, /// Indicates that a transaction originating from LDK needs to have its fee bumped. This event /// requires confirmed external funds to be readily available to spend. @@ -1766,11 +1800,12 @@ impl Writeable for Event { (8, path.blinded_tail, option), }) }, - &Event::HTLCHandlingFailed { ref prev_channel_id, ref failed_next_destination } => { + &Event::HTLCHandlingFailed { ref prev_channel_id, ref failure_type, ref failure_reason } => { 25u8.write(writer)?; write_tlv_fields!(writer, { (0, prev_channel_id, required), - (2, failed_next_destination, required), + (1, failure_reason, option), + (2, failure_type, required), }) }, &Event::BumpTransaction(ref event)=> { @@ -2218,14 +2253,24 @@ impl MaybeReadable for Event { 25u8 => { let mut f = || { let mut prev_channel_id = ChannelId::new_zero(); - let mut failed_next_destination_opt = UpgradableRequired(None); + let mut failure_reason = None; + let mut failure_type_opt = UpgradableRequired(None); read_tlv_fields!(reader, { (0, prev_channel_id, required), - (2, failed_next_destination_opt, upgradable_required), + (1, failure_reason, option), + (2, failure_type_opt, upgradable_required), }); + + // If a legacy HTLCHandlingFailureType::UnknownNextHop was written, upgrade + // it to its new representation, otherwise leave unchanged. + if let Some(HTLCHandlingFailureType::UnknownNextHop { requested_forward_scid }) = failure_type_opt.0 { + failure_type_opt.0 = Some(HTLCHandlingFailureType::InvalidForward { requested_forward_scid }); + failure_reason = Some(LocalHTLCFailureReason::UnknownNextPeer.into()); + } Ok(Some(Event::HTLCHandlingFailed { prev_channel_id, - failed_next_destination: _init_tlv_based_struct_field!(failed_next_destination_opt, upgradable_required), + failure_type: _init_tlv_based_struct_field!(failure_type_opt, upgradable_required), + failure_reason })) }; f() diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index edf811951ec..a956f2ebae2 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -11,7 +11,7 @@ use crate::blinded_path::message::{MessageContext, OffersContext}; use crate::blinded_path::payment::PaymentContext; use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentTlvs}; use crate::chain::channelmonitor::{HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; -use crate::events::{Event, HTLCDestination, PaidBolt12Invoice, PaymentFailureReason}; +use crate::events::{Event, HTLCHandlingFailureType, PaidBolt12Invoice, PaymentFailureReason}; use crate::ln::blinded_payment_tests::{fail_blinded_htlc_backwards, get_blinded_route_parameters}; use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; use crate::ln::functional_test_utils::*; @@ -172,7 +172,7 @@ fn invalid_keysend_payment_secret() { PassAlongPathArgs::new(&nodes[0], &expected_route[0], amt_msat, payment_hash, ev.clone()) .with_payment_secret(invalid_payment_secret) .with_payment_preimage(keysend_preimage) - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); do_pass_along_path(args); let updates_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -701,7 +701,7 @@ fn amount_doesnt_match_invreq() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(keysend_preimage) .without_claimable_event() - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); do_pass_along_path(args); // Modify the invoice request stored in our outbounds to be the correct one, to make sure the @@ -917,7 +917,7 @@ fn invalid_async_receive_with_retry( nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( nodes[2].node.get_and_clear_pending_events(), - &[HTLCDestination::FailedPayment { payment_hash }], + &[HTLCHandlingFailureType::Receive { payment_hash }], ); nodes[2].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[2], 1); @@ -937,7 +937,7 @@ fn invalid_async_receive_with_retry( let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(keysend_preimage) .without_claimable_event() - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], true); @@ -1103,7 +1103,7 @@ fn expired_static_invoice_payment_path() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(keysend_preimage) .without_claimable_event() - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], false); nodes[2].logger.assert_log_contains( diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 368b9cd199a..a358c1af273 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -15,7 +15,7 @@ use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; use crate::blinded_path; use crate::blinded_path::payment::{BlindedPaymentPath, Bolt12RefundContext, ForwardTlvs, PaymentConstraints, PaymentContext, PaymentForwardNode, PaymentRelay, UnauthenticatedReceiveTlvs, PAYMENT_PADDING_ROUND_OFF}; use crate::blinded_path::utils::is_padded; -use crate::events::{Event, HTLCDestination, PaymentFailureReason}; +use crate::events::{Event, HTLCHandlingFailureType, PaymentFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::ln::channelmanager; @@ -425,10 +425,10 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); let failed_destination = match check { - ForwardCheckFail::InboundOnionCheck => HTLCDestination::InvalidOnion, - ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion, + ForwardCheckFail::InboundOnionCheck => HTLCHandlingFailureType::InvalidOnion, + ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCHandlingFailureType::InvalidOnion, ForwardCheckFail::OutboundChannelCheck => - HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }, }; expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[failed_destination.clone()] @@ -457,9 +457,9 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { expect_pending_htlcs_forwardable!(nodes[2]); let failed_destination = match check { - ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion, + ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCHandlingFailureType::InvalidOnion, ForwardCheckFail::OutboundChannelCheck => - HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }, }; expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), &[failed_destination.clone()] @@ -527,7 +527,7 @@ fn failed_backwards_to_intro_node() { do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); check_added_monitors(&nodes[2], 1); let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -606,7 +606,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, $curr_node.node.peer_disconnected($next_node.node.get_our_node_id()); expect_pending_htlcs_forwardable!($curr_node); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), - vec![HTLCDestination::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]); + vec![HTLCHandlingFailureType::Forward { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]); }, ProcessPendingHTLCsCheck::FwdChannelClosed => { // Force close the next-hop channel so when we go to forward in process_pending_htlc_forwards, @@ -626,7 +626,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, $curr_node.node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), - vec![HTLCDestination::UnknownNextHop { requested_forward_scid: $failed_scid }]); + vec![HTLCHandlingFailureType::InvalidForward { requested_forward_scid: $failed_scid }]); $curr_node.node.process_pending_htlc_forwards(); }, } @@ -725,7 +725,7 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { if intercept_node_fails { nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::UnknownNextHop { requested_forward_scid: intercept_scid }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(&nodes[1], 1); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1]], false); @@ -830,7 +830,7 @@ fn three_hop_blinded_path_fail() { nodes[3].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( - nodes[3].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }] + nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -958,7 +958,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { ); nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( - nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }] + nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[2].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[2], 1); @@ -988,7 +988,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ReceiveRequirements => { @@ -998,7 +998,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ChannelCheck => { @@ -1014,7 +1014,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[2].node.handle_shutdown(nodes[1].node.get_our_node_id(), &node_1_shutdown); commitment_signed_dance!(nodes[2], nodes[1], (), false, true, false, false); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors(&nodes[2], 1); }, ReceiveCheckFail::ProcessPendingHTLCsCheck => { @@ -1024,7 +1024,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[2], - vec![HTLCDestination::FailedPayment { payment_hash }]); + vec![HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[2], 1); }, ReceiveCheckFail::PaymentConstraints => { @@ -1032,7 +1032,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { check_added_monitors!(nodes[2], 0); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors(&nodes[2], 1); } } @@ -1121,7 +1121,7 @@ fn blinded_path_retries() { ($intro_node: expr) => { nodes[3].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_conditions( - nodes[3].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }] + nodes[3].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }] ); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -1243,7 +1243,7 @@ fn min_htlc() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }] + &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }] ); check_added_monitors(&nodes[1], 1); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1436,7 +1436,7 @@ fn fails_receive_tlvs_authentication() { expect_pending_htlcs_forwardable!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); let mut update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_fail.update_fail_htlcs.len() == 1); @@ -2141,7 +2141,7 @@ fn do_test_trampoline_single_hop_receive(success: bool) { let args = PassAlongPathArgs::new(&nodes[0], route, amt_msat, payment_hash, first_message_event) .with_payment_preimage(payment_preimage) .without_claimable_event() - .expect_failure(HTLCDestination::InvalidOnion); + .expect_failure(HTLCHandlingFailureType::InvalidOnion); do_pass_along_path(args); { @@ -2435,7 +2435,7 @@ fn test_trampoline_forward_rejection() { let args = PassAlongPathArgs::new(&nodes[0], route, amt_msat, payment_hash, first_message_event) .with_payment_preimage(payment_preimage) .without_claimable_event() - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); do_pass_along_path(args); { diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1192a14e2b9..e24e2f25bdd 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -17,7 +17,7 @@ use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; -use crate::events::{Event, PaymentPurpose, ClosureReason, HTLCDestination}; +use crate::events::{Event, PaymentPurpose, ClosureReason, HTLCHandlingFailureType}; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; use crate::ln::channel::AnnouncementSigsState; use crate::ln::msgs; @@ -825,7 +825,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -906,7 +906,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1731,7 +1731,7 @@ fn test_monitor_update_on_pending_forwards() { let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }]); check_added_monitors!(nodes[2], 1); let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -1753,7 +1753,7 @@ fn test_monitor_update_on_pending_forwards() { commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -2160,7 +2160,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); @@ -2512,7 +2512,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f }; if second_fails { nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); } else { @@ -2550,7 +2550,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_fails.0 = 1; reconnect_nodes(reconnect_args); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); } else { let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_claims.0 = 1; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 363f2ffdb65..810a7aec607 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -44,7 +44,7 @@ use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock}; use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, MAX_BLOCKS_FOR_CONF, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent}; +use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, ClosureReason, HTLCHandlingFailureType, PaymentFailureReason, ReplayEvent}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::inbound_payment; @@ -3298,7 +3298,7 @@ macro_rules! handle_monitor_update_completion { } $self.finalize_claims(updates.finalized_claimed_htlcs); for failure in updates.failed_htlcs.drain(..) { - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver); } } } @@ -3924,7 +3924,7 @@ where for htlc_source in failed_htlcs.drain(..) { let failure_reason = LocalHTLCFailureReason::ChannelClosed; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; + let receiver = HTLCHandlingFailureType::Forward { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } @@ -4048,7 +4048,7 @@ where let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::ChannelClosed; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update { @@ -5733,7 +5733,7 @@ where }); let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer); - let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id }; + let destination = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: short_channel_id }; self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &reason, destination); } else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted @@ -5744,20 +5744,20 @@ where let mut decode_update_add_htlcs = new_hash_map(); mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap()); - let get_failed_htlc_destination = |outgoing_scid_opt: Option, payment_hash: PaymentHash| { + let get_htlc_failure_type = |outgoing_scid_opt: Option, payment_hash: PaymentHash| { if let Some(outgoing_scid) = outgoing_scid_opt { match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) { Some((outgoing_counterparty_node_id, outgoing_channel_id)) => - HTLCDestination::NextHopChannel { + HTLCHandlingFailureType::Forward { node_id: Some(*outgoing_counterparty_node_id), channel_id: *outgoing_channel_id, }, - None => HTLCDestination::UnknownNextHop { + None => HTLCHandlingFailureType::InvalidForward { requested_forward_scid: outgoing_scid, }, } } else { - HTLCDestination::FailedPayment { payment_hash } + HTLCHandlingFailureType::Receive { payment_hash } } }; @@ -5787,8 +5787,9 @@ where &update_add_htlc, &*self.node_signer, &*self.logger, &self.secp_ctx ) { Ok(decoded_onion) => decoded_onion, - Err((htlc_fail, _)) => { - htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion)); + + Err((htlc_fail, reason)) => { + htlc_fails.push((htlc_fail, HTLCHandlingFailureType::InvalidOnion, reason.into())); continue; }, }; @@ -5815,8 +5816,8 @@ where &update_add_htlc, &incoming_counterparty_node_id, err, reason, is_intro_node_blinded_forward, &shared_secret, ); - let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, htlc_destination)); + let failure_type = get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, failure_type, reason.into())); continue; }, // The incoming channel no longer exists, HTLCs should be resolved onchain instead. @@ -5832,8 +5833,8 @@ where &update_add_htlc, &incoming_counterparty_node_id, err, reason, is_intro_node_blinded_forward, &shared_secret, ); - let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, htlc_destination)); + let failure_type = get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, failure_type, reason.into())); continue; } } @@ -5844,9 +5845,10 @@ where ) { Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)), Err(inbound_err) => { - let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); + let failure_type = get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + let htlc_failure = inbound_err.reason.into(); let htlc_fail = self.construct_pending_htlc_fail_msg(&update_add_htlc, &incoming_counterparty_node_id, shared_secret, inbound_err); - htlc_fails.push((htlc_fail, htlc_destination)); + htlc_fails.push((htlc_fail, failure_type, htlc_failure)); }, } } @@ -5858,7 +5860,7 @@ where incoming_channel_id, incoming_user_channel_id, htlc_forwards.drain(..).collect() ); self.forward_htlcs_without_forward_event(&mut [pending_forwards]); - for (htlc_fail, htlc_destination) in htlc_fails.drain(..) { + for (htlc_fail, failure_type, failure_reason) in htlc_fails.drain(..) { let failure = match htlc_fail { HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC { htlc_id: fail_htlc.htlc_id, @@ -5873,7 +5875,8 @@ where self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_default().push(failure); self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed { prev_channel_id: incoming_channel_id, - failed_next_destination: htlc_destination, + failure_type, + failure_reason: Some(failure_reason), }, None)); } } @@ -5929,9 +5932,9 @@ where }); let reason = if $next_hop_unknown { - HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id } + HTLCHandlingFailureType::InvalidForward { requested_forward_scid: short_chan_id } } else { - HTLCDestination::FailedPayment{ payment_hash } + HTLCHandlingFailureType::Receive{ payment_hash } }; failed_forwards.push((htlc_source, payment_hash, @@ -6116,7 +6119,7 @@ where let data = self.get_htlc_inbound_temp_fail_data(reason); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::reason(reason, data), - HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } + HTLCHandlingFailureType::Forward { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } )); } else { forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); @@ -6267,7 +6270,7 @@ where cltv_expiry: Some(cltv_expiry), }), payment_hash, HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, err_data), - HTLCDestination::FailedPayment { payment_hash: $payment_hash }, + HTLCHandlingFailureType::Receive { payment_hash: $payment_hash }, )); continue 'next_forwardable_htlc; } @@ -6825,7 +6828,7 @@ where let source = HTLCSource::PreviousHopData(htlc_source.0.clone()); let failure_reason = LocalHTLCFailureReason::MPPTimeout; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 }; + let receiver = HTLCHandlingFailureType::Receive { payment_hash: htlc_source.1 }; self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver); } @@ -6890,7 +6893,7 @@ where for htlc in payment.htlcs { let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash }; + let receiver = HTLCHandlingFailureType::Receive { payment_hash: *payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } } @@ -6969,19 +6972,19 @@ where for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { let reason = HTLCFailReason::reason(failure_reason, onion_failure_data.clone()); - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id.clone()), channel_id }; self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver); } } - fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { + fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCHandlingFailureType) { let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination); if push_forward_event { self.push_pending_forwards_ev(); } } /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. - fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool { + fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, failure_type: HTLCHandlingFailureType) -> bool { // Ensure that no peer state channel storage lock is held when calling this function. // This ensures that future code doesn't introduce a lock-order requirement for // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling @@ -7053,7 +7056,8 @@ where let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::HTLCHandlingFailed { prev_channel_id: *channel_id, - failed_next_destination: destination, + failure_type, + failure_reason: Some(onion_error.into()), }, None)); }, } @@ -7118,7 +7122,7 @@ where for htlc in htlcs { let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let receiver = HTLCDestination::FailedPayment { payment_hash }; + let receiver = HTLCHandlingFailureType::Receive { payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } return; @@ -7223,7 +7227,7 @@ where let err_data = invalid_payment_err_data(htlc.value, self.best_block.read().unwrap().height); let source = HTLCSource::PreviousHopData(htlc.prev_hop); let reason = HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, err_data); - let receiver = HTLCDestination::FailedPayment { payment_hash }; + let receiver = HTLCHandlingFailureType::Receive { payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); @@ -8755,7 +8759,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } for htlc_source in dropped_htlcs.drain(..) { - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; + let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ChannelClosed); self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } @@ -9129,7 +9133,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer), - HTLCDestination::InvalidForward { requested_forward_scid: scid }, + HTLCHandlingFailureType::InvalidForward { requested_forward_scid: scid }, )); } } @@ -9613,7 +9617,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); let failure_reason = LocalHTLCFailureReason::OnChainTimeout; - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; let reason = HTLCFailReason::from_failure_code(failure_reason); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } @@ -11692,7 +11696,7 @@ where let reason = LocalHTLCFailureReason::CLTVExpiryTooSoon; let data = self.get_htlc_inbound_temp_fail_data(reason); timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(reason, data), - HTLCDestination::NextHopChannel { node_id: Some(funded_channel.context.get_counterparty_node_id()), channel_id: funded_channel.context.channel_id() })); + HTLCHandlingFailureType::Forward { node_id: Some(funded_channel.context.get_counterparty_node_id()), channel_id: funded_channel.context.channel_id() })); } let logger = WithChannelContext::from(&self.logger, &funded_channel.context, None); if let Some(channel_ready) = channel_ready_opt { @@ -11814,7 +11818,7 @@ where let reason = LocalHTLCFailureReason::PaymentClaimBuffer; timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::reason(reason, invalid_payment_err_data(htlc.value, height)), - HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); + HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() })); false } else { true } }); @@ -11843,7 +11847,7 @@ where }; timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ForwardExpiryBuffer), - HTLCDestination::InvalidForward { requested_forward_scid })); + HTLCHandlingFailureType::InvalidForward { requested_forward_scid })); let logger = WithContext::from( &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash) ); @@ -14932,7 +14936,7 @@ where for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::ChannelClosed; - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id }; let reason = HTLCFailReason::from_failure_code(failure_reason); channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } @@ -14959,7 +14963,7 @@ mod tests { use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; use bitcoin::secp256k1::ecdh::SharedSecret; use core::sync::atomic::Ordering; - use crate::events::{Event, HTLCDestination, ClosureReason}; + use crate::events::{Event, HTLCHandlingFailureType, ClosureReason}; use crate::ln::onion_utils::AttributionData; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; @@ -15098,7 +15102,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15279,7 +15283,7 @@ mod tests { // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15324,7 +15328,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15371,7 +15375,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -15428,7 +15432,7 @@ mod tests { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash: mismatch_payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash: mismatch_payment_hash }]); check_added_monitors(&nodes[1], 1); let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 45336543483..68acb44f72e 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -13,7 +13,7 @@ use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::transaction::OutPoint; -use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, PaidBolt12Invoice, PathFailure, PaymentFailureReason, PaymentPurpose}; +use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCHandlingFailureType, PaidBolt12Invoice, PathFailure, PaymentFailureReason, PaymentPurpose}; use crate::events::bump_transaction::{BumpTransactionEvent, BumpTransactionEventHandler, Wallet, WalletSource}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; @@ -1969,8 +1969,8 @@ macro_rules! expect_htlc_handling_failed_destinations { for event in $events { match event { $crate::events::Event::PendingHTLCsForwardable { .. } => { }, - $crate::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => { - assert!($expected_failures.contains(&failed_next_destination)); + $crate::events::Event::HTLCHandlingFailed { ref failure_type, .. } => { + assert!($expected_failures.contains(&failure_type)); num_expected_failures -= 1; }, _ => panic!("Unexpected destination"), @@ -1981,9 +1981,9 @@ macro_rules! expect_htlc_handling_failed_destinations { } /// Checks that an [`Event::PendingHTLCsForwardable`] is available in the given events and, if -/// there are any [`Event::HTLCHandlingFailed`] events their [`HTLCDestination`] is included in the +/// there are any [`Event::HTLCHandlingFailed`] events their [`HTLCHandlingFailureType`] is included in the /// `expected_failures` set. -pub fn expect_pending_htlcs_forwardable_conditions(events: Vec, expected_failures: &[HTLCDestination]) { +pub fn expect_pending_htlcs_forwardable_conditions(events: Vec, expected_failures: &[HTLCHandlingFailureType]) { let count = expected_failures.len() + 1; assert_eq!(events.len(), count); assert!(events.iter().find(|event| matches!(event, Event::PendingHTLCsForwardable { .. })).is_some()); @@ -2153,7 +2153,7 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, ' if fail_backwards { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node_a, - vec![crate::events::HTLCDestination::NextHopChannel{ node_id: Some(node_b.node.get_our_node_id()), channel_id }]); + vec![crate::events::HTLCHandlingFailureType::Forward{ node_id: Some(node_b.node.get_our_node_id()), channel_id }]); check_added_monitors!(node_a, 1); let node_a_per_peer_state = node_a.node.per_peer_state.read().unwrap(); @@ -2686,7 +2686,7 @@ pub struct PassAlongPathArgs<'a, 'b, 'c, 'd> { pub is_probe: bool, pub custom_tlvs: Vec<(u64, Vec)>, pub payment_metadata: Option>, - pub expected_failure: Option, + pub expected_failure: Option, } impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> { @@ -2729,7 +2729,7 @@ impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> { self.payment_metadata = Some(payment_metadata); self } - pub fn expect_failure(mut self, failure: HTLCDestination) -> Self { + pub fn expect_failure(mut self, failure: HTLCHandlingFailureType) -> Self { self.payment_claimable_expected = false; self.expected_failure = Some(failure); self @@ -2868,7 +2868,7 @@ pub fn send_probe_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expect fail_payment_along_path(nodes_to_fail_payment.as_slice()); expect_htlc_handling_failed_destinations!( path.last().unwrap().node.get_and_clear_pending_events(), - &[HTLCDestination::FailedPayment { payment_hash: *payment_hash }] + &[HTLCHandlingFailureType::Receive { payment_hash: *payment_hash }] ); } } @@ -3182,7 +3182,7 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id()); } expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash); - let expected_destinations: Vec = repeat(HTLCDestination::FailedPayment { payment_hash: our_payment_hash }).take(expected_paths.len()).collect(); + let expected_destinations: Vec = repeat(HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }).take(expected_paths.len()).collect(); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(expected_paths[0].last().unwrap(), expected_destinations); pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash, PaymentFailureReason::RecipientRejected); @@ -3225,7 +3225,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe node.node.handle_update_fail_htlc(prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node); if !update_next_node { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCDestination::NextHopChannel { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCHandlingFailureType::Forward { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]); } } let events = node.node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 36764d4d375..f58229a6d71 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint; use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; use crate::events::bump_transaction::WalletSource; -use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; +use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCHandlingFailureType, PaymentFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel, COINBASE_MATURITY, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; @@ -1278,7 +1278,7 @@ pub fn holding_cell_htlc_counting() { // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1605,7 +1605,7 @@ pub fn test_fee_spike_violation_fails_htlc() { }; nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa_msg); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2390,7 +2390,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac // Check that nodes[1] fails the HTLC upstream expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); @@ -2412,7 +2412,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac connect_blocks(&nodes[1], ANTI_REORG_DELAY); // Expect handling another fail back event, but the HTLC is already gone expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); @@ -2438,7 +2438,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac PostFailBackAction::FailOffChain => { nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], - vec![HTLCDestination::FailedPayment { payment_hash }]); + vec![HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[2], 1); let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); let update_fail = commitment_update.update_fail_htlcs[0].clone(); @@ -3475,7 +3475,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.fail_htlc_backwards(&payment_hash); check_added_monitors!(nodes[2], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }]); check_added_monitors!(nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -3530,7 +3530,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3596,7 +3596,7 @@ pub fn test_simple_commitment_revoked_fail_backward() { check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3660,7 +3660,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); nodes[2].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3673,7 +3673,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // Drop the last RAA from 3 -> 2 nodes[2].node.fail_htlc_backwards(&second_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: second_payment_hash }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3690,7 +3690,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use check_added_monitors!(nodes[2], 1); nodes[2].node.fail_htlc_backwards(&third_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: third_payment_hash }]); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -4803,7 +4803,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -4867,7 +4867,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { connect_blocks(&nodes[1], 1); if forwarded_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); @@ -5536,7 +5536,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // Mine the HTLC timeout transaction on node B. mine_transaction(&nodes[1], &htlc_timeout_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -5702,10 +5702,10 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno check_added_monitors!(nodes[4], 0); let failed_destinations = vec![ - HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }, - HTLCDestination::FailedPayment { payment_hash: payment_hash_3 }, - HTLCDestination::FailedPayment { payment_hash: payment_hash_5 }, - HTLCDestination::FailedPayment { payment_hash: payment_hash_6 }, + HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }, + HTLCHandlingFailureType::Receive { payment_hash: payment_hash_3 }, + HTLCHandlingFailureType::Receive { payment_hash: payment_hash_5 }, + HTLCHandlingFailureType::Receive { payment_hash: payment_hash_6 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations); check_added_monitors!(nodes[4], 1); @@ -5723,8 +5723,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno check_added_monitors!(nodes[5], 0); let failed_destinations_2 = vec![ - HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }, - HTLCDestination::FailedPayment { payment_hash: payment_hash_4 }, + HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }, + HTLCHandlingFailureType::Receive { payment_hash: payment_hash_4 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2); check_added_monitors!(nodes[5], 1); @@ -5738,12 +5738,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events let failed_destinations_3 = vec![ - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3); check_added_monitors!(nodes[3], 1); @@ -5796,13 +5796,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno if deliver_last_raa { expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true); - let expected_destinations: Vec = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect(); + let expected_destinations: Vec = repeat(HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect(); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations); } else { - let expected_destinations: Vec = if announce_latest { - repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect() + let expected_destinations: Vec = if announce_latest { + repeat(HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect() } else { - repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect() + repeat(HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect() }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations); @@ -6173,7 +6173,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let htlc_value = if use_dust { 50000 } else { 3000000 }; let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); nodes[1].node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -7270,7 +7270,7 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -7337,7 +7337,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ check_added_monitors!(nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); @@ -7360,7 +7360,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); @@ -7411,7 +7411,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { check_added_monitors!(nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); @@ -7429,7 +7429,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { } expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); @@ -7493,7 +7493,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { // Fail one HTLC to prune it in the will-be-latest-local commitment tx nodes[1].node.fail_htlc_backwards(&payment_hash_2); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }]); check_added_monitors!(nodes[1], 1); let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -7757,7 +7757,7 @@ pub fn test_check_htlc_underpaying() { // Note that we first have to wait a random delay before processing the receipt of the HTLC, // and then will wait a second random delay before failing the HTLC back: expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); // Node 3 is expecting payment of 100_000 but received 10_000, // it should fail htlc like we didn't know the preimage. @@ -8038,7 +8038,7 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]); connect_block(&nodes[0], &block_129); let events = nodes[0].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]); + expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCHandlingFailureType::Receive { payment_hash: failed_payment_hash }]); match events.last().unwrap() { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} _ => panic!("Unexpected event"), @@ -8319,7 +8319,7 @@ pub fn test_bump_txn_sanitize_tracking_maps() { // Broadcast set of revoked txn on A connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }]); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -8962,7 +8962,7 @@ pub fn test_bad_secret_hash() { // We have to forward pending HTLCs once to process the receipt of the HTLC and then // again to process the pending backwards-failure of the HTLC expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive{ payment_hash: $payment_hash }]); check_added_monitors!(nodes[1], 1); // We should fail the payment back @@ -10091,7 +10091,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // additional block built on top of the current chain. nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -10114,7 +10114,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // avoid the A<->B channel closing (even though it already has). This will generate a // spurious HTLCHandlingFailed event. expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCDestination::NextHopChannel { node_id: Some(node_c_id), channel_id }]); + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id }]); } } @@ -10175,8 +10175,8 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { nodes[1].node.fail_htlc_backwards(&our_payment_hash); let expected_destinations = vec![ - HTLCDestination::FailedPayment { payment_hash: our_payment_hash }, - HTLCDestination::FailedPayment { payment_hash: our_payment_hash }, + HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }, + HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations); nodes[1].node.process_pending_htlc_forwards(); @@ -10197,7 +10197,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); } } else { // Let the second HTLC fail and claim the first - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -10305,7 +10305,7 @@ pub fn test_inconsistent_mpp_params() { } expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -10314,7 +10314,7 @@ pub fn test_inconsistent_mpp_params() { nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); check_added_monitors!(nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); @@ -10371,8 +10371,8 @@ pub fn test_double_partial_claim() { connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later let failed_destinations = vec![ - HTLCDestination::FailedPayment { payment_hash }, - HTLCDestination::FailedPayment { payment_hash }, + HTLCHandlingFailureType::Receive { payment_hash }, + HTLCHandlingFailureType::Receive { payment_hash }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations); @@ -10581,7 +10581,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); - expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); // With default dust exposure: 5000 sats if on_holder_tx { // Outbound dust balance: 6399 sats @@ -10742,7 +10742,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); - expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", 2535000, 2530000), 1); @@ -10795,7 +10795,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let node_id_1 = nodes[1].node.get_our_node_id(); expect_htlc_handling_failed_destinations!( nodes[0].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }] + &[HTLCHandlingFailureType::Forward { node_id: Some(node_id_1), channel_id: chan_id_1 }] ); let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id()); @@ -10901,7 +10901,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); nodes[1].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", expected_dust_exposure_msat, expected_dust_exposure_msat - 1), 1); @@ -11194,7 +11194,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); } else { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index e70e2cfa99b..38e43416866 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -14,7 +14,7 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATEN use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; -use crate::events::{Event, ClosureReason, HTLCDestination}; +use crate::events::{Event, ClosureReason, HTLCHandlingFailureType}; use crate::ln::channel; use crate::ln::types::ChannelId; use crate::ln::chan_utils; @@ -86,7 +86,7 @@ fn chanmon_fail_from_stale_commitment() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); check_added_monitors!(nodes[1], 1); let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1217,7 +1217,7 @@ fn test_no_preimage_inbound_htlc_balances() { assert_eq!(as_htlc_timeout_claim.len(), 1); check_spends!(as_htlc_timeout_claim[0], as_txn[0]); expect_pending_htlcs_forwardable_conditions!(nodes[0], - [HTLCDestination::FailedPayment { payment_hash: to_a_failed_payment_hash }]); + [HTLCHandlingFailureType::Receive { payment_hash: to_a_failed_payment_hash }]); assert_eq!(as_pre_spend_claims, sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(chan_id).unwrap().get_claimable_balances())); @@ -1235,7 +1235,7 @@ fn test_no_preimage_inbound_htlc_balances() { nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); connect_blocks(&nodes[1], TEST_FINAL_CLTV - (ANTI_REORG_DELAY - 1)); expect_pending_htlcs_forwardable_conditions!(nodes[1], - [HTLCDestination::FailedPayment { payment_hash: to_b_failed_payment_hash }]); + [HTLCHandlingFailureType::Receive { payment_hash: to_b_failed_payment_hash }]); let bs_htlc_timeout_claim = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_htlc_timeout_claim.len(), 1); check_spends!(bs_htlc_timeout_claim[0], as_txn[0]); @@ -1417,12 +1417,12 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ .iter().map(|a| *a).collect(); events.retain(|ev| { match ev { - Event::HTLCHandlingFailed { failed_next_destination: HTLCDestination::NextHopChannel { node_id, channel_id }, .. } => { + Event::HTLCHandlingFailed { failure_type: HTLCHandlingFailureType::Forward { node_id, channel_id }, .. } => { assert_eq!(*channel_id, chan_id); assert_eq!(*node_id, Some(nodes[1].node.get_our_node_id())); false }, - Event::HTLCHandlingFailed { failed_next_destination: HTLCDestination::FailedPayment { payment_hash }, .. } => { + Event::HTLCHandlingFailed { failure_type: HTLCHandlingFailureType::Receive { payment_hash }, .. } => { assert!(failed_payments.remove(payment_hash)); false }, @@ -1737,7 +1737,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { // pinnable claims, which the remainder of the test assumes. connect_blocks(&nodes[0], TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0], - [HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]); + [HTLCHandlingFailureType::Receive { payment_hash: failed_payment_hash }]); // A will generate justice tx from B's revoked commitment/HTLC tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 78cd40e9e34..c6d185b3c2e 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -47,7 +47,7 @@ use crate::blinded_path::IntroductionNode; use crate::blinded_path::message::BlindedMessagePath; use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentContext}; use crate::blinded_path::message::OffersContext; -use crate::events::{ClosureReason, Event, HTLCDestination, PaidBolt12Invoice, PaymentFailureReason, PaymentPurpose}; +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaidBolt12Invoice, PaymentFailureReason, PaymentPurpose}; use crate::ln::channelmanager::{Bolt12PaymentError, MAX_SHORT_LIVED_RELATIVE_EXPIRY, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry, self}; use crate::types::features::Bolt12InvoiceFeatures; use crate::ln::functional_test_utils::*; @@ -2308,7 +2308,7 @@ fn rejects_keysend_to_non_static_invoice_path() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(payment_preimage) - .expect_failure(HTLCDestination::FailedPayment { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); do_pass_along_path(args); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 5e17afe62d6..fab6d941a7d 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -13,7 +13,7 @@ use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::sign::{EntropySource, NodeSigner, Recipient}; -use crate::events::{Event, HTLCDestination, PathFailure, PaymentFailureReason}; +use crate::events::{Event, HTLCHandlingFailureType, PathFailure, PaymentFailureReason}; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; use crate::ln::channelmanager::{HTLCForwardInfo, FailureCode, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, PendingAddHTLCInfo, PendingHTLCInfo, PendingHTLCRouting, PaymentId, RecipientOnionFields}; @@ -52,11 +52,11 @@ use crate::ln::onion_utils::{construct_trampoline_onion_keys, construct_trampoli use super::msgs::OnionErrorPacket; use super::onion_utils::AttributionData; -fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option, expected_short_channel_id: Option, expected_htlc_destination: Option) +fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option, expected_short_channel_id: Option, expected_failure_type: Option) where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC), F2: FnMut(), { - run_onion_failure_test_with_fail_intercept(_name, test_case, nodes, route, payment_hash, payment_secret, callback_msg, |_|{}, callback_node, expected_retryable, expected_error_code, expected_channel_update, expected_short_channel_id, expected_htlc_destination); + run_onion_failure_test_with_fail_intercept(_name, test_case, nodes, route, payment_hash, payment_secret, callback_msg, |_|{}, callback_node, expected_retryable, expected_error_code, expected_channel_update, expected_short_channel_id, expected_failure_type); } // test_case @@ -71,7 +71,7 @@ fn run_onion_failure_test_with_fail_intercept( payment_secret: &PaymentSecret, mut callback_msg: F1, mut callback_fail: F2, mut callback_node: F3, expected_retryable: bool, expected_error_reason: Option, expected_channel_update: Option, expected_short_channel_id: Option, - expected_htlc_destination: Option, + expected_failure_type: Option, ) where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC), F2: for <'a> FnMut(&'a mut msgs::UpdateFailHTLC), @@ -114,7 +114,7 @@ fn run_onion_failure_test_with_fail_intercept( let update_1_0 = match test_case { 0|100 => { // intermediate node failure; fail backward to 0 expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[expected_htlc_destination.clone().unwrap()]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[expected_failure_type.clone().unwrap()]); check_added_monitors(&nodes[1], 1); let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(update_1_0.update_fail_htlcs.len()+update_1_0.update_fail_malformed_htlcs.len()==1 && (update_1_0.update_fail_htlcs.len()==1 || update_1_0.update_fail_malformed_htlcs.len()==1)); @@ -145,10 +145,10 @@ fn run_onion_failure_test_with_fail_intercept( expect_htlc_forward!(&nodes[2]); expect_event!(&nodes[2], Event::PaymentClaimable); callback_node(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }]); } else if test_case == 1 || test_case == 3 { expect_htlc_forward!(&nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), vec![expected_htlc_destination.clone().unwrap()]); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), vec![expected_failure_type.clone().unwrap()]); } check_added_monitors!(&nodes[2], 1); @@ -314,7 +314,7 @@ fn test_fee_failures() { run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; }, || {}, true, Some(LocalHTLCFailureReason::FeeInsufficient), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), - Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 })); + Some(HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 })); // In an earlier version, we spuriously failed to forward payments if the expected feerate // changed between the channel open and the payment. @@ -360,7 +360,7 @@ fn test_onion_failure() { // positive case send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 40000); - let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 }; + let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 }; // intermediate node failure let short_channel_id = channels[1].0.contents.short_channel_id; @@ -379,7 +379,7 @@ fn test_onion_failure() { // describing a length-1 TLV payload, which is obviously bogus. new_payloads[0].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, true, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + }, ||{}, true, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCHandlingFailureType::InvalidOnion)); // final node failure let short_channel_id = channels[1].0.contents.short_channel_id; @@ -398,7 +398,7 @@ fn test_onion_failure() { // length-1 TLV payload, which is obviously bogus. new_payloads[1].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, false, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + }, ||{}, false, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCHandlingFailureType::InvalidOnion)); // the following three with run_onion_failure_test_with_fail_intercept() test only the origin node // receiving simulated fail messages @@ -480,13 +480,13 @@ fn test_onion_failure() { // the UpdateAddHTLC that we sent. let short_channel_id = channels[0].0.contents.short_channel_id; run_onion_failure_test("invalid_onion_version", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.version = 1; }, ||{}, true, - Some(LocalHTLCFailureReason::InvalidOnionVersion), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionVersion), None, Some(short_channel_id), Some(HTLCHandlingFailureType::InvalidOnion)); run_onion_failure_test("invalid_onion_hmac", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.hmac = [3; 32]; }, ||{}, true, - Some(LocalHTLCFailureReason::InvalidOnionHMAC), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionHMAC), None, Some(short_channel_id), Some(HTLCHandlingFailureType::InvalidOnion)); run_onion_failure_test("invalid_onion_key", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.public_key = Err(secp256k1::Error::InvalidPublicKey);}, ||{}, true, - Some(LocalHTLCFailureReason::InvalidOnionKey), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionKey), None, Some(short_channel_id), Some(HTLCHandlingFailureType::InvalidOnion)); let short_channel_id = channels[1].0.contents.short_channel_id; let chan_update = ChannelUpdate::dummy(short_channel_id); @@ -549,7 +549,7 @@ fn test_onion_failure() { bogus_route.paths[0].hops[1].short_channel_id -= 1; let short_channel_id = bogus_route.paths[0].hops[1].short_channel_id; run_onion_failure_test("unknown_next_peer", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(LocalHTLCFailureReason::UnknownNextPeer), - Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), Some(HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id })); + Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), Some(HTLCHandlingFailureType::InvalidForward { requested_forward_scid: short_channel_id })); let short_channel_id = channels[1].0.contents.short_channel_id; let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) @@ -605,7 +605,7 @@ fn test_onion_failure() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); - }, || {}, false, Some(LocalHTLCFailureReason::IncorrectPaymentDetails), None, None, Some(HTLCDestination::FailedPayment { payment_hash })); + }, || {}, false, Some(LocalHTLCFailureReason::IncorrectPaymentDetails), None, None, Some(HTLCHandlingFailureType::Receive { payment_hash })); run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[1].node.process_pending_update_add_htlcs(); @@ -618,7 +618,7 @@ fn test_onion_failure() { } } } - }, true, Some(LocalHTLCFailureReason::FinalIncorrectCLTVExpiry), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); + }, true, Some(LocalHTLCFailureReason::FinalIncorrectCLTVExpiry), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCHandlingFailureType::Receive { payment_hash })); run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[1].node.process_pending_update_add_htlcs(); @@ -632,7 +632,7 @@ fn test_onion_failure() { } } } - }, true, Some(LocalHTLCFailureReason::FinalIncorrectHTLCAmount), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); + }, true, Some(LocalHTLCFailureReason::FinalIncorrectHTLCAmount), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCHandlingFailureType::Receive { payment_hash })); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { @@ -927,7 +927,7 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { run_onion_failure_test( name, 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true, Some(error_reason), Some(network_update), Some(short_channel_id), - Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_to_update.0 }), + Some(HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_to_update.0 }), ); }; @@ -1383,7 +1383,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { expect_payment_claimable!(nodes[1], payment_hash, payment_secret, payment_amount); nodes[1].node.fail_htlc_backwards_with_reason(&payment_hash, failure_code); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1518,7 +1518,7 @@ fn test_phantom_onion_hmac_failure() { } }; nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1595,7 +1595,7 @@ fn test_phantom_invalid_onion_payload() { } } nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1653,7 +1653,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { } } nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1700,7 +1700,7 @@ fn test_phantom_failure_too_low_cltv() { expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1751,7 +1751,7 @@ fn test_phantom_failure_modified_cltv() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: phantom_scid }] ); check_added_monitors(&nodes[1], 1); @@ -1800,7 +1800,7 @@ fn test_phantom_failure_expires_too_soon() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: phantom_scid }] ); check_added_monitors(&nodes[1], 1); @@ -1847,7 +1847,7 @@ fn test_phantom_failure_too_low_recv_amt() { nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1905,7 +1905,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::UnknownNextHop { requested_forward_scid: phantom_scid }] + &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: phantom_scid }] ); check_added_monitors(&nodes[1], 1); @@ -1954,7 +1954,7 @@ fn test_phantom_failure_reject_payment() { nodes[1].node.process_pending_htlc_forwards(); expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_amt_msat, None, route.paths[0].hops.last().unwrap().pubkey); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index f4cd412cc1d..17c48e03733 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -11,6 +11,7 @@ use super::msgs::OnionErrorPacket; use crate::blinded_path::BlindedHop; use crate::crypto::chacha20::ChaCha20; use crate::crypto::streams::ChaChaReader; +use crate::events::HTLCHandlingFailureReason; use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; use crate::ln::channelmanager::{HTLCSource, RecipientOnionFields}; use crate::ln::msgs; @@ -1761,6 +1762,17 @@ impl_writeable_tlv_based_enum!(LocalHTLCFailureReason, (85, PeerOffline) => {}, ); +impl From<&HTLCFailReason> for HTLCHandlingFailureReason { + fn from(value: &HTLCFailReason) -> Self { + match value.0 { + HTLCFailReasonRepr::LightningError { .. } => HTLCHandlingFailureReason::Downstream, + HTLCFailReasonRepr::Reason { failure_reason, .. } => { + HTLCHandlingFailureReason::Local { reason: failure_reason } + }, + } + } +} + #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(PartialEq))] pub(super) struct HTLCFailReason(HTLCFailReasonRepr); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index c62b80d5653..7934f060df1 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -14,7 +14,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::sign::EntropySource; -use crate::events::{ClosureReason, Event, HTLCDestination, PathFailure, PaymentFailureReason, PaymentPurpose}; +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PathFailure, PaymentFailureReason, PaymentPurpose}; use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI}; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; @@ -128,7 +128,7 @@ fn mpp_retry() { // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]); let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -233,7 +233,7 @@ fn mpp_retry_overpay() { // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }] ); @@ -329,7 +329,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { } // Failed HTLC from node 3 -> 1 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingFailureType::Receive { payment_hash }]); let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id()); assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1); nodes[1].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]); @@ -337,7 +337,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false); // Failed HTLC from node 1 -> 0 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]); let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]); @@ -565,14 +565,14 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { } } nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[3], 1); // Fail back along nodes[2] let update_fail_0 = get_htlc_update_msgs!(&nodes[3], &nodes[2].node.get_our_node_id()); nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &update_fail_0.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], update_fail_0.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_channel_id }]); check_added_monitors!(nodes[2], 1); let update_fail_1 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); @@ -663,7 +663,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2}] + &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2}] ); check_added_monitors(&nodes[1], 1); // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected @@ -914,14 +914,14 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // previous hop channel is already on-chain, but it makes nodes[2] willing to see additional // incoming HTLCs with the same payment hash later. nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[2], 1); let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved @@ -1199,7 +1199,7 @@ fn test_fulfill_restart_failure() { reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]); @@ -1657,7 +1657,7 @@ fn abandoned_send_payment_idempotent() { check_send_rejected!(); nodes[1].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }]); // Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the // PaymentId. @@ -1939,7 +1939,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { if test == InterceptTest::Fail { // Ensure we can fail the intercepted payment back. nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::UnknownNextHop { requested_forward_scid: intercept_scid }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -2015,7 +2015,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::InvalidForward { requested_forward_scid: intercept_scid }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); check_added_monitors!(nodes[1], 1); let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_timeout_updates.update_add_htlcs.is_empty()); @@ -2231,7 +2231,7 @@ fn do_automatic_retries(test: AutoRetry) { expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: $failing_channel_id, }]); @@ -2715,7 +2715,7 @@ fn fails_paying_after_rejected_by_payee() { expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash }]); pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash, PaymentFailureReason::RecipientRejected); } @@ -3022,7 +3022,7 @@ fn no_extra_retries_on_back_to_back_fail() { check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone(), next_hop_failure.clone()]); check_added_monitors(&nodes[1], 1); @@ -3213,7 +3213,7 @@ fn test_simple_partial_retry() { commitment_signed_dance!(nodes[1], nodes[0], second_htlc_updates.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); check_added_monitors(&nodes[1], 2); @@ -3406,7 +3406,7 @@ fn test_threaded_payment_retries() { nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::UnknownNextHop { requested_forward_scid: route.paths[0].hops[1].short_channel_id }] + &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: route.paths[0].hops[1].short_channel_id }] ); check_added_monitors(&nodes[1], 1); @@ -3621,7 +3621,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { if fail_payment { // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead // and expire both immediately, though, by connecting another 4 blocks. - let reason = HTLCDestination::FailedPayment { payment_hash }; + let reason = HTLCHandlingFailureType::Receive { payment_hash }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason.clone()]); connect_blocks(&nodes[3], 4); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]); @@ -3768,7 +3768,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { }, (false, true) => { nodes[1].node.claim_funds(our_payment_preimage); - let expected_destinations = vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]; + let expected_destinations = vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], expected_destinations); pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, our_payment_hash, PaymentFailureReason::RecipientRejected); } @@ -3816,7 +3816,7 @@ fn test_retry_custom_tlvs() { // Attempt to forward the payment and complete the path's failure. expect_pending_htlcs_forwardable!(&nodes[1]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], - vec![HTLCDestination::NextHopChannel { + vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2_id }]); @@ -3994,7 +3994,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: expect_payment_sent(&nodes[0], our_payment_preimage, Some(Some(2000)), true, true); } else { // Expect fail back - let expected_destinations = vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]; + let expected_destinations = vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], expected_destinations); check_added_monitors!(nodes[3], 1); @@ -4003,7 +4003,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![ - HTLCDestination::NextHopChannel { + HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); @@ -4089,7 +4089,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd }] + &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd }] ); check_added_monitors(&nodes[2], 1); @@ -4147,7 +4147,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_conditions(nodes[3].node.get_and_clear_pending_events(), - &[HTLCDestination::FailedPayment {payment_hash}]); + &[HTLCHandlingFailureType::Receive {payment_hash}]); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[3], 1); @@ -4156,7 +4156,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true); expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]); + &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]); } else { expect_pending_htlcs_forwardable!(nodes[3]); expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat); @@ -4237,7 +4237,7 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // The forwarding node should reject forwarding it as expected. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); @@ -4404,7 +4404,7 @@ fn test_non_strict_forwarding() { }; // The failure to forward will refer to the channel given in the onion. expect_pending_htlcs_forwardable_conditions(nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]); + &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index ed707771f84..6c46e4b5441 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -12,7 +12,7 @@ //! LSP). use crate::chain::ChannelMonitorUpdateStatus; -use crate::events::{ClosureReason, Event, HTLCDestination}; +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; use crate::ln::channelmanager::{MIN_CLTV_EXPIRY_DELTA, PaymentId, RecipientOnionFields}; use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::routing::gossip::RoutingFees; @@ -75,7 +75,7 @@ fn test_priv_forwarding_rejection() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] + &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] ); check_added_monitors(&nodes[1], 1); @@ -445,7 +445,7 @@ fn test_inbound_scid_privacy() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: last_hop[0].channel_id }] + &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: last_hop[0].channel_id }] ); check_added_monitors(&nodes[1], 1); @@ -504,7 +504,7 @@ fn test_scid_alias_returned() { commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }]); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -530,7 +530,7 @@ fn test_scid_alias_returned() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }] + &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }] ); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/quiescence_tests.rs b/lightning/src/ln/quiescence_tests.rs index d35fe5a33be..c0b82799c5c 100644 --- a/lightning/src/ln/quiescence_tests.rs +++ b/lightning/src/ln/quiescence_tests.rs @@ -1,5 +1,5 @@ use crate::chain::ChannelMonitorUpdateStatus; -use crate::events::{Event, HTLCDestination}; +use crate::events::{Event, HTLCHandlingFailureType}; use crate::ln::channel::DISCONNECT_PEER_AWAITING_RESPONSE_TICKS; use crate::ln::channelmanager::PaymentId; use crate::ln::channelmanager::RecipientOnionFields; @@ -144,7 +144,7 @@ fn allow_shutdown_while_awaiting_quiescence(local_shutdown: bool) { expect_pending_htlcs_forwardable!(remote_node); expect_htlc_handling_failed_destinations!( remote_node.node.get_and_clear_pending_events(), - &[HTLCDestination::FailedPayment { payment_hash }] + &[HTLCHandlingFailureType::Receive { payment_hash }] ); check_added_monitors(remote_node, 1); @@ -342,7 +342,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { // `stfu`, the `update_fail/fulfill` will go into the holding cell. if fail_htlc { nodes[1].node.fail_htlc_backwards(&payment_hash2); - let failed_payment = HTLCDestination::FailedPayment { payment_hash: payment_hash2 }; + let failed_payment = HTLCHandlingFailureType::Receive { payment_hash: payment_hash2 }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![failed_payment]); } else { nodes[1].node.claim_funds(payment_preimage2); @@ -392,7 +392,7 @@ fn quiescence_updates_go_to_holding_cell(fail_htlc: bool) { // Have nodes[0] fail/claim nodes[1]'s payment. if fail_htlc { nodes[0].node.fail_htlc_backwards(&payment_hash1); - let failed_payment = HTLCDestination::FailedPayment { payment_hash: payment_hash1 }; + let failed_payment = HTLCHandlingFailureType::Receive { payment_hash: payment_hash1 }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[0], vec![failed_payment]); } else { nodes[0].node.claim_funds(payment_preimage1); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index a2d567dba96..78243d8db39 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -15,7 +15,7 @@ use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateStep}; use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::sign::EntropySource; use crate::chain::transaction::OutPoint; -use crate::events::{ClosureReason, Event, HTLCDestination}; +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; use crate::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, PaymentId, RecipientOnionFields, RAACommitmentOrder}; use crate::ln::msgs; use crate::ln::types::ChannelId; @@ -1112,7 +1112,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } if !claim_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); } else { expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true); } @@ -1178,7 +1178,7 @@ fn removed_payment_no_manager_persistence() { let node_encoded = nodes[1].node.encode(); nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1210,7 +1210,7 @@ fn removed_payment_no_manager_persistence() { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1324,7 +1324,7 @@ fn test_htlc_localremoved_persistence() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash: mismatch_payment_hash }]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash: mismatch_payment_hash }]); check_added_monitors(&nodes[1], 1); // Save the update_fail_htlc message for later comparison. diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 7c6ac7dffa0..589d7a29803 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -13,7 +13,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::OutPoint; use crate::chain::Confirm; -use crate::events::{Event, ClosureReason, HTLCDestination}; +use crate::events::{Event, ClosureReason, HTLCHandlingFailureType}; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, MessageSendEvent}; use crate::ln::types::ChannelId; use crate::sign::OutputSpender; @@ -130,7 +130,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { } else { // Confirm the timeout tx and check that we fail the HTLC backwards connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, Vec::new())); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); } check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index ae4f73231e6..fb70860b38c 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -13,7 +13,7 @@ use crate::sign::{EntropySource, SignerProvider}; use crate::chain::ChannelMonitorUpdateStatus; use crate::chain::transaction::OutPoint; -use crate::events::{Event, HTLCDestination, ClosureReason}; +use crate::events::{Event, HTLCHandlingFailureType, ClosureReason}; use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState}; use crate::ln::channelmanager::{self, PaymentId, RecipientOnionFields, Retry}; use crate::routing::router::{PaymentParameters, get_route, RouteParameters}; @@ -468,7 +468,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }] + &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }] ); check_added_monitors(&nodes[1], 1); @@ -1336,7 +1336,7 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { if use_htlc { nodes[0].node.fail_htlc_backwards(&payment_hash_opt.unwrap()); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[0], - [HTLCDestination::FailedPayment { payment_hash: payment_hash_opt.unwrap() }]); + [HTLCHandlingFailureType::Receive { payment_hash: payment_hash_opt.unwrap() }]); } else { *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap() *= 10; nodes[0].node.timer_tick_occurred(); diff --git a/pending_changelog/3700-reason-in-handling-failed.txt b/pending_changelog/3700-reason-in-handling-failed.txt new file mode 100644 index 00000000000..5a8643554df --- /dev/null +++ b/pending_changelog/3700-reason-in-handling-failed.txt @@ -0,0 +1,8 @@ +## API Updates (0.2) + +* The `HTLCHandlingFailed` event was updated to include a `failure_reason` field that provides + additional information about why the HTLC was failed. +* The `failed_next_destination` field, which previously contained a combination of failure + and HTLC-related information, was renamed to `failure_type` and the `UnknownNextHop` + variant was deprecated. This type will be represented as `InvalidForward` for nodes + downgrading from v0.2.0.