Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions lightning/src/chain/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -706,6 +706,17 @@ pub(crate) enum ChannelMonitorUpdateStep {
ReleasePaymentComplete {
htlc: SentHTLCId,
},
/// When an [`Event::PaymentClaimed`] is processed by the user, we need to track that so we don't
/// keep regenerating the event redundantly on startup.
///
/// This will remove the HTLC from [`ChannelMonitor::get_stored_preimages`].
///
/// Note that this is only generated for closed channels -- if the channel is open, the inbound
/// payment is pruned automatically when the HTLC is no longer present in any unrevoked
/// commitment transaction.
InboundPaymentClaimed {
payment_hash: PaymentHash,
},
}

impl ChannelMonitorUpdateStep {
Expand All @@ -723,6 +734,7 @@ impl ChannelMonitorUpdateStep {
ChannelMonitorUpdateStep::RenegotiatedFunding { .. } => "RenegotiatedFunding",
ChannelMonitorUpdateStep::RenegotiatedFundingLocked { .. } => "RenegotiatedFundingLocked",
ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => "ReleasePaymentComplete",
ChannelMonitorUpdateStep::InboundPaymentClaimed { .. } => "InboundPaymentClaimed",
}
}
}
Expand Down Expand Up @@ -769,6 +781,9 @@ impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep,
(3, htlc_data, required),
(5, claimed_htlcs, required_vec),
},
(9, InboundPaymentClaimed) => {
(1, payment_hash, required),
},
(10, RenegotiatedFunding) => {
(1, channel_parameters, (required: ReadableArgs, None)),
(3, holder_commitment_tx, required),
Expand Down Expand Up @@ -1342,6 +1357,10 @@ pub(crate) struct ChannelMonitorImpl<Signer: EcdsaChannelSigner> {
/// this and we'll store the set of fully resolved payments here.
htlcs_resolved_to_user: HashSet<SentHTLCId>,

/// The set of inbound payments for which the user has processed an [`Event::PaymentClaimed`].
/// This is used to avoid regenerating the event redundantly on restart for closed channels.
inbound_payments_claimed: HashSet<PaymentHash>,

/// The set of `SpendableOutput` events which we have already passed upstream to be claimed.
/// These are tracked explicitly to ensure that we don't generate the same events redundantly
/// if users duplicatively confirm old transactions. Specifically for transactions claiming a
Expand Down Expand Up @@ -1755,6 +1774,7 @@ pub(crate) fn write_chanmon_internal<Signer: EcdsaChannelSigner, W: Writer>(
(34, channel_monitor.alternative_funding_confirmed, option),
(35, channel_monitor.is_manual_broadcast, required),
(37, channel_monitor.funding_seen_onchain, required),
(39, channel_monitor.inbound_payments_claimed, required),
});

Ok(())
Expand Down Expand Up @@ -1954,6 +1974,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
confirmed_commitment_tx_counterparty_output: None,
htlcs_resolved_on_chain: Vec::new(),
htlcs_resolved_to_user: new_hash_set(),
inbound_payments_claimed: new_hash_set(),
spendable_txids_confirmed: Vec::new(),

best_block,
Expand Down Expand Up @@ -3249,6 +3270,20 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {

pub(crate) fn get_stored_preimages(
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure if we want to also rename this method

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yea, for sure.

&self,
) -> HashMap<PaymentHash, (PaymentPreimage, Vec<PaymentClaimDetails>)> {
let inner = self.inner.lock().unwrap();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, can we do this on a per-PaymentClaimDetails basis instead? In theory we can have two payments with the same hash/preimage.

inner
.payment_preimages
.iter()
.filter(|(hash, _)| !inner.inbound_payments_claimed.contains(*hash))
.map(|(hash, value)| (*hash, value.clone()))
.collect()
}

/// Used in tests to verify preimage propagation.
#[cfg(test)]
pub(crate) fn test_get_all_stored_preimages(
&self,
) -> HashMap<PaymentHash, (PaymentPreimage, Vec<PaymentClaimDetails>)> {
self.inner.lock().unwrap().payment_preimages.clone()
}
Expand Down Expand Up @@ -4150,6 +4185,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
assert_eq!(updates.updates.len(), 1);
match updates.updates[0] {
ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => {},
ChannelMonitorUpdateStep::InboundPaymentClaimed { .. } => {},
ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
// We should have already seen a `ChannelForceClosed` update if we're trying to
// provide a preimage at this point.
Expand Down Expand Up @@ -4281,6 +4317,10 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
log_trace!(logger, "HTLC {htlc:?} permanently and fully resolved");
self.htlcs_resolved_to_user.insert(*htlc);
},
ChannelMonitorUpdateStep::InboundPaymentClaimed { payment_hash } => {
log_trace!(logger, "Inbound payment {} claimed", payment_hash);
self.inbound_payments_claimed.insert(*payment_hash);
},
}
}

Expand Down Expand Up @@ -4313,6 +4353,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
ChannelMonitorUpdateStep::PaymentPreimage { .. } => {},
ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
ChannelMonitorUpdateStep::ReleasePaymentComplete { .. } => {},
ChannelMonitorUpdateStep::InboundPaymentClaimed { .. } => {},
}
}

Expand Down Expand Up @@ -6504,6 +6545,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
let mut funding_spend_confirmed = None;
let mut htlcs_resolved_on_chain = Some(Vec::new());
let mut htlcs_resolved_to_user = Some(new_hash_set());
let mut inbound_payments_claimed = Some(new_hash_set());
let mut funding_spend_seen = Some(false);
let mut counterparty_node_id = None;
let mut confirmed_commitment_tx_counterparty_output = None;
Expand Down Expand Up @@ -6543,6 +6585,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
(34, alternative_funding_confirmed, option),
(35, is_manual_broadcast, (default_value, false)),
(37, funding_seen_onchain, (default_value, true)),
(39, inbound_payments_claimed, option),
});
// Note that `payment_preimages_with_info` was added (and is always written) in LDK 0.1, so
// we can use it to determine if this monitor was last written by LDK 0.1 or later.
Expand Down Expand Up @@ -6708,6 +6751,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
confirmed_commitment_tx_counterparty_output,
htlcs_resolved_on_chain: htlcs_resolved_on_chain.unwrap(),
htlcs_resolved_to_user: htlcs_resolved_to_user.unwrap(),
inbound_payments_claimed: inbound_payments_claimed.unwrap(),
spendable_txids_confirmed: spendable_txids_confirmed.unwrap(),

best_block,
Expand Down
1 change: 1 addition & 0 deletions lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4598,6 +4598,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() {
// available.
nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2);
expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
check_added_monitors(&nodes[1], 1);
}

#[test]
Expand Down
176 changes: 125 additions & 51 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1475,6 +1475,21 @@ impl_writeable_tlv_based!(PaymentCompleteUpdate, {
(7, htlc_id, required),
});

#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct InboundPaymentClaimedUpdate {
pub counterparty_node_id: PublicKey,
pub channel_funding_outpoint: OutPoint,
pub channel_id: ChannelId,
pub payment_hash: PaymentHash,
}

impl_writeable_tlv_based!(InboundPaymentClaimedUpdate, {
(1, counterparty_node_id, required),
(3, channel_funding_outpoint, required),
(5, channel_id, required),
(7, payment_hash, required),
});

#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum EventCompletionAction {
ReleaseRAAChannelMonitorUpdate {
Expand All @@ -1489,6 +1504,12 @@ pub(crate) enum EventCompletionAction {
/// fully-resolved in the [`ChannelMonitor`], which we do via this action.
/// Note that this action will be dropped on downgrade to LDK prior to 0.2!
ReleasePaymentCompleteChannelMonitorUpdate(PaymentCompleteUpdate),

/// When a payment's resolution is communicated to the downstream logic via
/// [`Event::PaymentClaimed`], we may want to mark the payment as fully-resolved in the
/// [`ChannelMonitor`], which we do via this action.
/// Note that this action will be dropped on downgrade to LDK prior to 0.3!
InboundPaymentClaimedChannelMonitorUpdate(InboundPaymentClaimedUpdate),
}
impl_writeable_tlv_based_enum!(EventCompletionAction,
(0, ReleaseRAAChannelMonitorUpdate) => {
Expand All @@ -1500,8 +1521,9 @@ impl_writeable_tlv_based_enum!(EventCompletionAction,
}
ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.unwrap())
})),
}
},
{1, ReleasePaymentCompleteChannelMonitorUpdate} => (),
{3, InboundPaymentClaimedChannelMonitorUpdate} => (),
);

/// The source argument which is passed to [`ChannelManager::claim_mpp_part`].
Expand Down Expand Up @@ -9982,11 +10004,34 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let action = if let Some((outpoint, counterparty_node_id, channel_id)) =
durable_preimage_channel
{
Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: Some(outpoint),
counterparty_node_id,
channel_id,
})
let per_peer_state = self.per_peer_state.read().unwrap();
let is_channel_closed = per_peer_state
.get(&counterparty_node_id)
.map(|peer_state_mutex| {
let peer_state = peer_state_mutex.lock().unwrap();
!peer_state.channel_by_id.contains_key(&channel_id)
})
.unwrap_or(true);
// For open channels, we use ReleaseRAAChannelMonitorUpdate to maintain the blocking
// behavior (RAA updates are blocked until the PaymentClaimed event is handled).
// For closed channels, we use InboundPaymentClaimedChannelMonitorUpdate to persist
// that the PaymentClaimed event has been handled, preventing regeneration on restart.
if is_channel_closed {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I went with only generating these new monitor updates for claims on closed channels. So after #4462 we will still potentially have increased redundant PaymentClaimed events on restart for claims on open channels, until the claim is removed from all unrevoked commit txs. The approach is currently simple so I'm not sure it's worth fixing that, but open to thoughts.

Some(EventCompletionAction::InboundPaymentClaimedChannelMonitorUpdate(
InboundPaymentClaimedUpdate {
channel_funding_outpoint: outpoint,
counterparty_node_id,
channel_id,
payment_hash,
},
))
} else {
Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: Some(outpoint),
counterparty_node_id,
channel_id,
})
}
} else {
None
};
Expand Down Expand Up @@ -14809,56 +14854,85 @@ impl<
htlc_id,
},
) => {
let per_peer_state = self.per_peer_state.read().unwrap();
let mut peer_state_lock = per_peer_state
.get(&counterparty_node_id)
.map(|state| state.lock().unwrap())
.expect("Channels originating a payment resolution must have peer state");
let peer_state = &mut *peer_state_lock;
let update_id = peer_state
.closed_channel_monitor_update_ids
.get_mut(&channel_id)
.expect("Channels originating a payment resolution must have a monitor");
// Note that for channels closed pre-0.1, the latest update_id is `u64::MAX`.
*update_id = update_id.saturating_add(1);

let update = ChannelMonitorUpdate {
update_id: *update_id,
channel_id: Some(channel_id),
updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete {
htlc: htlc_id,
}],
};

let during_startup =
!self.background_events_processed_since_startup.load(Ordering::Acquire);
if during_startup {
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo: channel_funding_outpoint,
channel_id,
update,
};
self.pending_background_events.lock().unwrap().push(event);
} else {
if let Some(actions) = self.handle_post_close_monitor_update(
&mut peer_state.in_flight_monitor_updates,
&mut peer_state.monitor_update_blocked_actions,
channel_funding_outpoint,
update,
counterparty_node_id,
channel_id,
) {
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions(actions);
}
}
let update_step =
ChannelMonitorUpdateStep::ReleasePaymentComplete { htlc: htlc_id };
self.handle_closed_channel_monitor_update_for_event(
counterparty_node_id,
channel_funding_outpoint,
channel_id,
update_step,
);
},
EventCompletionAction::InboundPaymentClaimedChannelMonitorUpdate(
InboundPaymentClaimedUpdate {
counterparty_node_id,
channel_funding_outpoint,
channel_id,
payment_hash,
},
) => {
let update_step =
ChannelMonitorUpdateStep::InboundPaymentClaimed { payment_hash };
self.handle_closed_channel_monitor_update_for_event(
counterparty_node_id,
channel_funding_outpoint,
channel_id,
update_step,
);
},
}
}
}

/// Helper for handling closed-channel monitor updates triggered by [`EventCompletionAction`]s.
fn handle_closed_channel_monitor_update_for_event(
&self, counterparty_node_id: PublicKey, funding_outpoint: OutPoint, channel_id: ChannelId,
update_step: ChannelMonitorUpdateStep,
) {
let per_peer_state = self.per_peer_state.read().unwrap();
let mut peer_state_lock = per_peer_state
.get(&counterparty_node_id)
.map(|state| state.lock().unwrap())
.expect("Channels originating a payment resolution must have peer state");
let peer_state = &mut *peer_state_lock;
let update_id = peer_state
.closed_channel_monitor_update_ids
.get_mut(&channel_id)
.expect("Channels originating a payment resolution must have a monitor");
// Note that for channels closed pre-0.1, the latest update_id is `u64::MAX`.
*update_id = update_id.saturating_add(1);

let update = ChannelMonitorUpdate {
update_id: *update_id,
channel_id: Some(channel_id),
updates: vec![update_step],
};

let during_startup =
!self.background_events_processed_since_startup.load(Ordering::Acquire);
if during_startup {
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo: funding_outpoint,
channel_id,
update,
};
self.pending_background_events.lock().unwrap().push(event);
} else {
if let Some(actions) = self.handle_post_close_monitor_update(
&mut peer_state.in_flight_monitor_updates,
&mut peer_state.monitor_update_blocked_actions,
funding_outpoint,
update,
counterparty_node_id,
channel_id,
) {
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
self.handle_monitor_update_completion_actions(actions);
}
}
}
/// Processes any events asynchronously in the order they were generated since the last call
/// using the given event handler.
///
Expand Down
Loading
Loading