diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index a8d055a9c5b..c4b20e72e4b 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -706,6 +706,17 @@ pub(crate) enum ChannelMonitorUpdateStep { ReleasePaymentComplete { htlc: SentHTLCId, }, + /// When an [`Event::PaymentClaimed`] is processed by the user, we need to track that so we don't + /// keep regenerating the event redundantly on startup. + /// + /// This will remove the HTLC from [`ChannelMonitor::get_stored_preimages`]. + /// + /// Note that this is only generated for closed channels -- if the channel is open, the inbound + /// payment is pruned automatically when the HTLC is no longer present in any unrevoked + /// commitment transaction. + InboundPaymentClaimed { + payment_hash: PaymentHash, + }, } impl ChannelMonitorUpdateStep { @@ -723,6 +734,7 @@ impl ChannelMonitorUpdateStep { ChannelMonitorUpdateStep::RenegotiatedFunding { .. } => "RenegotiatedFunding", ChannelMonitorUpdateStep::RenegotiatedFundingLocked { .. } => "RenegotiatedFundingLocked", ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => "ReleasePaymentComplete", + ChannelMonitorUpdateStep::InboundPaymentClaimed { .. } => "InboundPaymentClaimed", } } } @@ -769,6 +781,9 @@ impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep, (3, htlc_data, required), (5, claimed_htlcs, required_vec), }, + (9, InboundPaymentClaimed) => { + (1, payment_hash, required), + }, (10, RenegotiatedFunding) => { (1, channel_parameters, (required: ReadableArgs, None)), (3, holder_commitment_tx, required), @@ -1342,6 +1357,10 @@ pub(crate) struct ChannelMonitorImpl { /// this and we'll store the set of fully resolved payments here. htlcs_resolved_to_user: HashSet, + /// The set of inbound payments for which the user has processed an [`Event::PaymentClaimed`]. + /// This is used to avoid regenerating the event redundantly on restart for closed channels. + inbound_payments_claimed: HashSet, + /// The set of `SpendableOutput` events which we have already passed upstream to be claimed. /// These are tracked explicitly to ensure that we don't generate the same events redundantly /// if users duplicatively confirm old transactions. Specifically for transactions claiming a @@ -1755,6 +1774,7 @@ pub(crate) fn write_chanmon_internal( (34, channel_monitor.alternative_funding_confirmed, option), (35, channel_monitor.is_manual_broadcast, required), (37, channel_monitor.funding_seen_onchain, required), + (39, channel_monitor.inbound_payments_claimed, required), }); Ok(()) @@ -1954,6 +1974,7 @@ impl ChannelMonitor { confirmed_commitment_tx_counterparty_output: None, htlcs_resolved_on_chain: Vec::new(), htlcs_resolved_to_user: new_hash_set(), + inbound_payments_claimed: new_hash_set(), spendable_txids_confirmed: Vec::new(), best_block, @@ -3249,6 +3270,20 @@ impl ChannelMonitor { pub(crate) fn get_stored_preimages( &self, + ) -> HashMap)> { + let inner = self.inner.lock().unwrap(); + inner + .payment_preimages + .iter() + .filter(|(hash, _)| !inner.inbound_payments_claimed.contains(*hash)) + .map(|(hash, value)| (*hash, value.clone())) + .collect() + } + + /// Used in tests to verify preimage propagation. + #[cfg(test)] + pub(crate) fn test_get_all_stored_preimages( + &self, ) -> HashMap)> { self.inner.lock().unwrap().payment_preimages.clone() } @@ -4150,6 +4185,7 @@ impl ChannelMonitorImpl { assert_eq!(updates.updates.len(), 1); match updates.updates[0] { ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => {}, + ChannelMonitorUpdateStep::InboundPaymentClaimed { .. } => {}, ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {}, // We should have already seen a `ChannelForceClosed` update if we're trying to // provide a preimage at this point. @@ -4281,6 +4317,10 @@ impl ChannelMonitorImpl { log_trace!(logger, "HTLC {htlc:?} permanently and fully resolved"); self.htlcs_resolved_to_user.insert(*htlc); }, + ChannelMonitorUpdateStep::InboundPaymentClaimed { payment_hash } => { + log_trace!(logger, "Inbound payment {} claimed", payment_hash); + self.inbound_payments_claimed.insert(*payment_hash); + }, } } @@ -4313,6 +4353,7 @@ impl ChannelMonitorImpl { ChannelMonitorUpdateStep::PaymentPreimage { .. } => {}, ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {}, ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => {}, + ChannelMonitorUpdateStep::InboundPaymentClaimed { .. } => {}, } } @@ -6504,6 +6545,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut funding_spend_confirmed = None; let mut htlcs_resolved_on_chain = Some(Vec::new()); let mut htlcs_resolved_to_user = Some(new_hash_set()); + let mut inbound_payments_claimed = Some(new_hash_set()); let mut funding_spend_seen = Some(false); let mut counterparty_node_id = None; let mut confirmed_commitment_tx_counterparty_output = None; @@ -6543,6 +6585,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (34, alternative_funding_confirmed, option), (35, is_manual_broadcast, (default_value, false)), (37, funding_seen_onchain, (default_value, true)), + (39, inbound_payments_claimed, option), }); // Note that `payment_preimages_with_info` was added (and is always written) in LDK 0.1, so // we can use it to determine if this monitor was last written by LDK 0.1 or later. @@ -6708,6 +6751,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP confirmed_commitment_tx_counterparty_output, htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(), htlcs_resolved_to_user: htlcs_resolved_to_user.unwrap(), + inbound_payments_claimed: inbound_payments_claimed.unwrap(), spendable_txids_confirmed: spendable_txids_confirmed.unwrap(), best_block, diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index cd32d219b93..b43320ce324 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -4598,6 +4598,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { // available. nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2); expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); + check_added_monitors(&nodes[1], 1); } #[test] diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ada27af749f..ea51ebf02b4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1475,6 +1475,21 @@ impl_writeable_tlv_based!(PaymentCompleteUpdate, { (7, htlc_id, required), }); +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct InboundPaymentClaimedUpdate { + pub counterparty_node_id: PublicKey, + pub channel_funding_outpoint: OutPoint, + pub channel_id: ChannelId, + pub payment_hash: PaymentHash, +} + +impl_writeable_tlv_based!(InboundPaymentClaimedUpdate, { + (1, counterparty_node_id, required), + (3, channel_funding_outpoint, required), + (5, channel_id, required), + (7, payment_hash, required), +}); + #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) enum EventCompletionAction { ReleaseRAAChannelMonitorUpdate { @@ -1489,6 +1504,12 @@ pub(crate) enum EventCompletionAction { /// fully-resolved in the [`ChannelMonitor`], which we do via this action. /// Note that this action will be dropped on downgrade to LDK prior to 0.2! ReleasePaymentCompleteChannelMonitorUpdate(PaymentCompleteUpdate), + + /// When a payment's resolution is communicated to the downstream logic via + /// [`Event::PaymentClaimed`], we may want to mark the payment as fully-resolved in the + /// [`ChannelMonitor`], which we do via this action. + /// Note that this action will be dropped on downgrade to LDK prior to 0.3! + InboundPaymentClaimedChannelMonitorUpdate(InboundPaymentClaimedUpdate), } impl_writeable_tlv_based_enum!(EventCompletionAction, (0, ReleaseRAAChannelMonitorUpdate) => { @@ -1500,8 +1521,9 @@ impl_writeable_tlv_based_enum!(EventCompletionAction, } ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.unwrap()) })), - } + }, {1, ReleasePaymentCompleteChannelMonitorUpdate} => (), + {3, InboundPaymentClaimedChannelMonitorUpdate} => (), ); /// The source argument which is passed to [`ChannelManager::claim_mpp_part`]. @@ -9982,11 +10004,34 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let action = if let Some((outpoint, counterparty_node_id, channel_id)) = durable_preimage_channel { - Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint: Some(outpoint), - counterparty_node_id, - channel_id, - }) + let per_peer_state = self.per_peer_state.read().unwrap(); + let is_channel_closed = per_peer_state + .get(&counterparty_node_id) + .map(|peer_state_mutex| { + let peer_state = peer_state_mutex.lock().unwrap(); + !peer_state.channel_by_id.contains_key(&channel_id) + }) + .unwrap_or(true); + // For open channels, we use ReleaseRAAChannelMonitorUpdate to maintain the blocking + // behavior (RAA updates are blocked until the PaymentClaimed event is handled). + // For closed channels, we use InboundPaymentClaimedChannelMonitorUpdate to persist + // that the PaymentClaimed event has been handled, preventing regeneration on restart. + if is_channel_closed { + Some(EventCompletionAction::InboundPaymentClaimedChannelMonitorUpdate( + InboundPaymentClaimedUpdate { + channel_funding_outpoint: outpoint, + counterparty_node_id, + channel_id, + payment_hash, + }, + )) + } else { + Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint: Some(outpoint), + counterparty_node_id, + channel_id, + }) + } } else { None }; @@ -14809,56 +14854,85 @@ impl< htlc_id, }, ) => { - let per_peer_state = self.per_peer_state.read().unwrap(); - let mut peer_state_lock = per_peer_state - .get(&counterparty_node_id) - .map(|state| state.lock().unwrap()) - .expect("Channels originating a payment resolution must have peer state"); - let peer_state = &mut *peer_state_lock; - let update_id = peer_state - .closed_channel_monitor_update_ids - .get_mut(&channel_id) - .expect("Channels originating a payment resolution must have a monitor"); - // Note that for channels closed pre-0.1, the latest update_id is `u64::MAX`. - *update_id = update_id.saturating_add(1); - - let update = ChannelMonitorUpdate { - update_id: *update_id, - channel_id: Some(channel_id), - updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { - htlc: htlc_id, - }], - }; - - let during_startup = - !self.background_events_processed_since_startup.load(Ordering::Acquire); - if during_startup { - let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, - funding_txo: channel_funding_outpoint, - channel_id, - update, - }; - self.pending_background_events.lock().unwrap().push(event); - } else { - if let Some(actions) = self.handle_post_close_monitor_update( - &mut peer_state.in_flight_monitor_updates, - &mut peer_state.monitor_update_blocked_actions, - channel_funding_outpoint, - update, - counterparty_node_id, - channel_id, - ) { - mem::drop(peer_state_lock); - mem::drop(per_peer_state); - self.handle_monitor_update_completion_actions(actions); - } - } + let update_step = + ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id }; + self.handle_closed_channel_monitor_update_for_event( + counterparty_node_id, + channel_funding_outpoint, + channel_id, + update_step, + ); + }, + EventCompletionAction::InboundPaymentClaimedChannelMonitorUpdate( + InboundPaymentClaimedUpdate { + counterparty_node_id, + channel_funding_outpoint, + channel_id, + payment_hash, + }, + ) => { + let update_step = + ChannelMonitorUpdateStep::InboundPaymentClaimed { payment_hash }; + self.handle_closed_channel_monitor_update_for_event( + counterparty_node_id, + channel_funding_outpoint, + channel_id, + update_step, + ); }, } } } + /// Helper for handling closed-channel monitor updates triggered by [`EventCompletionAction`]s. + fn handle_closed_channel_monitor_update_for_event( + &self, counterparty_node_id: PublicKey, funding_outpoint: OutPoint, channel_id: ChannelId, + update_step: ChannelMonitorUpdateStep, + ) { + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut peer_state_lock = per_peer_state + .get(&counterparty_node_id) + .map(|state| state.lock().unwrap()) + .expect("Channels originating a payment resolution must have peer state"); + let peer_state = &mut *peer_state_lock; + let update_id = peer_state + .closed_channel_monitor_update_ids + .get_mut(&channel_id) + .expect("Channels originating a payment resolution must have a monitor"); + // Note that for channels closed pre-0.1, the latest update_id is `u64::MAX`. + *update_id = update_id.saturating_add(1); + + let update = ChannelMonitorUpdate { + update_id: *update_id, + channel_id: Some(channel_id), + updates: vec![update_step], + }; + + let during_startup = + !self.background_events_processed_since_startup.load(Ordering::Acquire); + if during_startup { + let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo: funding_outpoint, + channel_id, + update, + }; + self.pending_background_events.lock().unwrap().push(event); + } else { + if let Some(actions) = self.handle_post_close_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + funding_outpoint, + update, + counterparty_node_id, + channel_id, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(actions); + } + } + } /// Processes any events asynchronously in the order they were generated since the last call /// using the given event handler. /// diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 18a976871a6..0527010102b 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -225,6 +225,9 @@ fn archive_fully_resolved_monitors() { nodes[1].node.claim_funds(payment_preimage); check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 10_000_000); + // Processing PaymentClaimed on a closed channel generates a monitor update to mark the claim as + // resolved to the user. + check_added_monitors(&nodes[1], 1); let htlc_claim_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(htlc_claim_tx.len(), 1); @@ -3282,22 +3285,29 @@ fn test_update_replay_panics() { nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); + // Processing PaymentClaimed on a closed channel generates a monitor update to mark the claim as + // resolved to the user. + check_added_monitors(&nodes[1], 1); nodes[1].node.claim_funds(payment_preimage_2); check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000); + check_added_monitors(&nodes[1], 1); let mut updates = nodes[1].chain_monitor.monitor_updates.lock().unwrap().get_mut(&chan.2).unwrap().split_off(0); - // Update `monitor` until there's just one normal updates, an FC update, and a post-FC claim - // update pending - for update in updates.drain(..updates.len() - 4) { + // Update `monitor` until there's just one normal updates, an FC update, a post-FC claim + // and InboundPaymentClaimed updates pending. + // Updates are: [normal, FC, preimage1, inbound_claimed1, preimage2, inbound_claimed2] + for update in updates.drain(..updates.len() - 6) { monitor.update_monitor(&update, &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); } - assert_eq!(updates.len(), 4); + assert_eq!(updates.len(), 6); assert!(matches!(updates[1].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. })); assert!(matches!(updates[2].updates[0], ChannelMonitorUpdateStep::PaymentPreimage { .. })); - assert!(matches!(updates[3].updates[0], ChannelMonitorUpdateStep::PaymentPreimage { .. })); + assert!(matches!(updates[3].updates[0], ChannelMonitorUpdateStep::InboundPaymentClaimed { .. })); + assert!(matches!(updates[4].updates[0], ChannelMonitorUpdateStep::PaymentPreimage { .. })); + assert!(matches!(updates[5].updates[0], ChannelMonitorUpdateStep::InboundPaymentClaimed { .. })); // Ensure applying the force-close update skipping the last normal update fails let poisoned_monitor = monitor.clone(); @@ -3384,11 +3394,13 @@ fn test_claim_event_never_handled() { let chan_0_monitor_serialized = get_monitor!(nodes[1], chan.2).encode(); let mons = &[&chan_0_monitor_serialized[..]]; reload_node!(nodes[1], &init_node_ser, mons, persister, new_chain_mon, nodes_1_reload); + check_added_monitors(&nodes[1], 0); expect_payment_claimed!(nodes[1], payment_hash_a, 1_000_000); - // The reload logic spuriously generates a redundant payment preimage-containing - // `ChannelMonitorUpdate`. - check_added_monitors(&nodes[1], 2); + // The reload logic spuriously generates 2 redundant payment preimage-containing + // `ChannelMonitorUpdate`s, plus we get a monitor update once the PaymentClaimed event is + // processed. + check_added_monitors(&nodes[1], 3); } fn do_test_lost_preimage_monitor_events(on_counterparty_tx: bool, p2a_anchor: bool) { @@ -3863,6 +3875,7 @@ fn test_ladder_preimage_htlc_claims() { check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, &[node_id_0], 1_000_000); nodes[1].node.claim_funds(payment_preimage1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(&nodes[1], payment_hash1, 1_000_000); check_added_monitors(&nodes[1], 1); @@ -3884,6 +3897,7 @@ fn test_ladder_preimage_htlc_claims() { check_added_monitors(&nodes[0], 1); nodes[1].node.claim_funds(payment_preimage2); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(&nodes[1], payment_hash2, 1_000_000); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index bb730f8fba8..bc78c2a50a1 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -853,16 +853,20 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_rest if persist_both_monitors { if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); } if let Event::PaymentClaimed { amount_msat: 15_000_000, .. } = events[3] { } else { panic!(); } - check_added_monitors(&nodes[3], 4); + // 4 monitors for preimage updates + 1 for InboundPaymentClaimed marking the payment as + // claimed in the closed channel's monitor. + check_added_monitors(&nodes[3], 5); } else { if let Event::PaymentClaimed { amount_msat: 15_000_000, .. } = events[2] { } else { panic!(); } + // Only one channel closed; the durable_preimage_channel is the live one, so no extra + // InboundPaymentClaimed update is generated. check_added_monitors(&nodes[3], 3); } // Now that we've processed background events, the preimage should have been copied into the // non-persisted monitor: - assert!(get_monitor!(nodes[3], chan_id_persisted).get_stored_preimages().contains_key(&payment_hash)); - assert!(get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash)); + assert!(get_monitor!(nodes[3], chan_id_persisted).test_get_all_stored_preimages().contains_key(&payment_hash)); + assert!(get_monitor!(nodes[3], chan_id_not_persisted).test_get_all_stored_preimages().contains_key(&payment_hash)); // On restart, we should also get a duplicate PaymentClaimed event as we persisted the // ChannelManager prior to handling the original one.