From 56834a190fd47cab0cc2cd4d8ca9ffe698ddbf8b Mon Sep 17 00:00:00 2001 From: Fuyin Date: Fri, 11 Jul 2025 01:13:32 +0800 Subject: [PATCH] Prune locktimed packages when inputs are spent We have to prune locktimed packages when their inputs are spent, otherwise the notification of the watched outputs might be missed. This can lead to locktimed packages with spent inputs being added back to the pending claim requests in the future, and they are never cleaned up until node restart. Resolves: #3859 --- lightning/src/chain/onchaintx.rs | 21 ++++ lightning/src/ln/functional_tests.rs | 54 +++------ lightning/src/ln/monitor_tests.rs | 15 +-- lightning/src/ln/reorg_tests.rs | 162 +++++++++++++++++++++++++++ 4 files changed, 201 insertions(+), 51 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index fd0f0d9abf5..8b795e2581c 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -269,6 +269,9 @@ pub struct OnchainTxHandler { #[cfg(not(any(test, feature = "_test_utils")))] claimable_outpoints: HashMap, + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) locktimed_packages: BTreeMap>, + #[cfg(not(any(test, feature = "_test_utils")))] locktimed_packages: BTreeMap>, onchain_events_awaiting_threshold_conf: Vec, @@ -994,6 +997,17 @@ impl OnchainTxHandler { panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map"); } } + + // Also remove/split any locktimed packages whose inputs have been spent by this transaction. + self.locktimed_packages.retain(|_locktime, packages|{ + packages.retain_mut(|package| { + if let Some(p) = package.split_package(&inp.previous_output) { + claimed_outputs_material.push(p); + } + !package.outpoints().is_empty() + }); + !packages.is_empty() + }); } for package in claimed_outputs_material.drain(..) { let entry = OnchainEventEntry { @@ -1135,6 +1149,13 @@ impl OnchainTxHandler { //- resurect outpoint back in its claimable set and regenerate tx match entry.event { OnchainEvent::ContentiousOutpoint { package } => { + // We pass 0 to `package_locktime` to get the actual required locktime. + let package_locktime = package.package_locktime(0); + if package_locktime >= height { + self.locktimed_packages.entry(package_locktime).or_default().push(package); + continue; + } + if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) { if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) { assert!(request.merge_package(package, height).is_ok()); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 60aa21bc44e..e363089ed8b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1961,45 +1961,9 @@ pub fn test_htlc_on_chain_success() { _ => panic!("Unexpected event"), } - macro_rules! check_tx_local_broadcast { - ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => {{ - let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap(); - // HTLC timeout claims for non-anchor channels are only aggregated when claimed from the - // remote commitment transaction. - if $htlc_offered { - assert_eq!(node_txn.len(), 2); - for tx in node_txn.iter() { - check_spends!(tx, $commitment_tx); - assert_ne!(tx.lock_time, LockTime::ZERO); - assert_eq!( - tx.input[0].witness.last().unwrap().len(), - OFFERED_HTLC_SCRIPT_WEIGHT - ); - assert!(tx.output[0].script_pubkey.is_p2wsh()); // revokeable output - } - assert_ne!( - node_txn[0].input[0].previous_output, - node_txn[1].input[0].previous_output - ); - } else { - assert_eq!(node_txn.len(), 1); - check_spends!(node_txn[0], $commitment_tx); - assert_ne!(node_txn[0].lock_time, LockTime::ZERO); - assert_eq!( - node_txn[0].input[0].witness.last().unwrap().len(), - ACCEPTED_HTLC_SCRIPT_WEIGHT - ); - assert!(node_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment - assert_ne!( - node_txn[0].input[0].previous_output, - node_txn[0].input[1].previous_output - ); - } - node_txn.clear(); - }}; - } - // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success. - check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]); + // nodes[1] does not broadcast its own timeout-claim of the output as nodes[2] just claimed it + // via success. + assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); // Broadcast legit commitment tx from A on B's chain // Broadcast preimage tx by B on offered output from A commitment tx on A's chain @@ -2061,7 +2025,17 @@ pub fn test_htlc_on_chain_success() { _ => panic!("Unexpected event"), } } - check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]); + // HTLC timeout claims for non-anchor channels are only aggregated when claimed from the + // remote commitment transaction. + let mut node_txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(node_txn.len(), 2); + for tx in node_txn.iter() { + check_spends!(tx, node_a_commitment_tx[0]); + assert_ne!(tx.lock_time, LockTime::ZERO); + assert_eq!(tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(tx.output[0].script_pubkey.is_p2wsh()); // revokeable output + } + assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); } fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index cdcb570af68..5382fba2689 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -732,8 +732,9 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { test_spendable_output(&nodes[0], &remote_txn[0], false); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - // After broadcasting the HTLC claim transaction, node A will still consider the HTLC - // possibly-claimable up to ANTI_REORG_DELAY, at which point it will drop it. + // After confirming the HTLC claim transaction, node A will no longer attempt to claim said + // HTLC, unless the transaction is reorged. However, we'll still report a + // `MaybeTimeoutClaimableHTLC` balance for it until we reach `ANTI_REORG_DELAY` confirmations. mine_transaction(&nodes[0], &b_broadcast_txn[0]); if prev_commitment_tx { expect_payment_path_successful!(nodes[0]); @@ -749,18 +750,10 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { // When the HTLC timeout output is spendable in the next block, A should broadcast it connect_blocks(&nodes[0], htlc_cltv_timeout - nodes[0].best_block_info().1); let a_broadcast_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - // Aggregated claim transaction. assert_eq!(a_broadcast_txn.len(), 1); check_spends!(a_broadcast_txn[0], remote_txn[0]); - assert_eq!(a_broadcast_txn[0].input.len(), 2); - assert_ne!(a_broadcast_txn[0].input[0].previous_output.vout, a_broadcast_txn[0].input[1].previous_output.vout); - // a_broadcast_txn [0] and [1] should spend the HTLC outputs of the commitment tx - assert!(a_broadcast_txn[0].input.iter().any(|input| remote_txn[0].output[input.previous_output.vout as usize].value.to_sat() == 3_000)); + assert_eq!(a_broadcast_txn[0].input.len(), 1); assert!(a_broadcast_txn[0].input.iter().any(|input| remote_txn[0].output[input.previous_output.vout as usize].value.to_sat() == 4_000)); - - // Confirm node B's claim for node A to remove that claim from the aggregated claim transaction. - mine_transaction(&nodes[0], &b_broadcast_txn[0]); - let a_broadcast_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); let a_htlc_timeout_tx = a_broadcast_txn.into_iter().next_back().unwrap(); // Once the HTLC-Timeout transaction confirms, A will no longer consider the HTLC diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 4d01956e60a..39fc0b6c725 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -899,3 +899,165 @@ fn test_retries_own_commitment_broadcast_after_reorg() { do_test_retries_own_commitment_broadcast_after_reorg(true, false); do_test_retries_own_commitment_broadcast_after_reorg(true, true); } + +#[test] +pub fn test_pruned_locktimed_packages_recovery_after_reorg() { + use crate::events::bump_transaction::sync::WalletSourceSync; + use bitcoin::{Amount, Transaction, TxIn, TxOut}; + use bitcoin::locktime::absolute::LockTime; + use bitcoin::transaction::Version; + + // ====== TEST SETUP ====== + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + + let mut user_cfg = test_default_channel_config(); + user_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + user_cfg.manually_accept_inbound_channels = true; + + let configs = [Some(user_cfg.clone()), Some(user_cfg.clone())]; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &configs); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + // Since we're using anchor channels, make sure each node has a UTXO for paying fees. + let coinbase_tx = Transaction { + version: Version::TWO, + lock_time: LockTime::ZERO, + input: vec![TxIn { ..Default::default() }], + output: vec![ + TxOut { + value: Amount::ONE_BTC, + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::ONE_BTC, + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ], + }; + nodes[0].wallet_source.add_utxo( + bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, + coinbase_tx.output[0].value, + ); + nodes[1].wallet_source.add_utxo( + bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, + coinbase_tx.output[1].value, + ); + + const CHAN_CAPACITY: u64 = 10_000_000; + let (_, _, channel_id, funding_tx) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, CHAN_CAPACITY, 0); + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + connect_blocks(node, blocks_to_mine); + } + } + + // ====== TEST PROCESS ====== + + // Route HTLC 1 from A to B. + let (preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + // Node B claims HTLC 1. + nodes[1].node.claim_funds(preimage_1); + expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); + check_added_monitors(&nodes[1], 1); + let _ = get_htlc_update_msgs(&nodes[1], &node_a_id); + + // Force close the channel by broadcasting node B's commitment tx. + let node_b_commit_tx = get_local_commitment_txn!(nodes[1], channel_id); + assert_eq!(node_b_commit_tx.len(), 1); + let node_b_commit_tx = &node_b_commit_tx[0]; + check_spends!(node_b_commit_tx, funding_tx); + + let htlc_1_locktime = nodes[0].best_block_info().1 + 1 + TEST_FINAL_CLTV; + mine_transaction(&nodes[0], node_b_commit_tx); + check_closed_event( + &nodes[0], + 1, + ClosureReason::CommitmentTxConfirmed, + false, + &[node_b_id], + CHAN_CAPACITY, + ); + check_closed_broadcast!(nodes[0], true); + check_added_monitors(&nodes[0], 1); + + mine_transaction(&nodes[1], node_b_commit_tx); + check_closed_event( + &nodes[1], + 1, + ClosureReason::CommitmentTxConfirmed, + false, + &[node_a_id], + CHAN_CAPACITY, + ); + check_closed_broadcast!(nodes[1], true); + check_added_monitors(&nodes[1], 1); + + // Node B generates HTLC 1 claim tx. + let process_bump_event = |node: &Node| { + let events = node.chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let bump_event = match &events[0] { + Event::BumpTransaction(bump_event) => bump_event, + e => panic!("Unexepected event: {:#?}", e), + }; + node.bump_tx_handler.handle_event(bump_event); + + let mut tx = node.tx_broadcaster.txn_broadcast(); + assert_eq!(tx.len(), 1); + tx.pop().unwrap() + }; + let bs_htlc_1_claim_tx = process_bump_event(&nodes[1]); + + let get_locktimed_packages = || { + let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(channel_id).unwrap(); + let onchain_tx_handler = &monitor.inner.lock().unwrap().onchain_tx_handler; + onchain_tx_handler.locktimed_packages.clone() + }; + + let locktimed_packages = get_locktimed_packages(); + let htlc_1_locktimed_package = { + let packages = locktimed_packages.get(&htlc_1_locktime) + .expect("HTLC 1 locktimed package should exist"); + assert_eq!(packages.len(), 1, "HTLC 1 locktimed package should have only one package"); + packages.first().unwrap().clone() + }; + + // HTLC 1 claim tx confirmed - Node A should prune its claim request from locktimed HTLC packages. + mine_transaction(&nodes[0], &bs_htlc_1_claim_tx); + let locktimed_packages = get_locktimed_packages(); + assert!(locktimed_packages.is_empty(), "locktimed packages should be pruned"); + + // Disconnect the block containing HTLC 1 claim tx to simulate a reorg. Node A should recover + // the pruned locktimed package. + disconnect_blocks(&nodes[0], 1); + let locktimed_packages = get_locktimed_packages(); + let recovered_htlc_1_locktimed_package = { + let packages = locktimed_packages.get(&htlc_1_locktime) + .expect("HTLC 1 locktimed package should be recovered"); + assert_eq!(packages.len(), 1, "HTLC 1 locktimed package should have only one package"); + packages.first().unwrap().clone() + }; + assert!(recovered_htlc_1_locktimed_package == htlc_1_locktimed_package, + "Recovered HTLC 1 locktimed package should match the original one"); + + // HTLC 1 locktime expires. + connect_blocks(&nodes[0], TEST_FINAL_CLTV); + // HTLC 1 timeout tx should be broadcasted. + let mut txs = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txs.len(), 1); + check_spends!(txs[0], node_b_commit_tx); + + // PaymentSent and PaymentPathSuccessful events. + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); +}