diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 4c195b20a78..9522d06c638 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3008,23 +3008,26 @@ impl ChannelMonitorImpl { (payment_preimage.clone(), payment_info.clone().into_iter().collect()) }); - let confirmed_spend_txid = self.funding_spend_confirmed.or_else(|| { - self.onchain_events_awaiting_threshold_conf.iter().find_map(|event| match event.event { - OnchainEvent::FundingSpendConfirmation { .. } => Some(event.txid), - _ => None, - }) - }); - let confirmed_spend_txid = if let Some(txid) = confirmed_spend_txid { - txid - } else { - return; - }; + let confirmed_spend_info = self.funding_spend_confirmed + .map(|txid| (txid, None)) + .or_else(|| { + self.onchain_events_awaiting_threshold_conf.iter().find_map(|event| match event.event { + OnchainEvent::FundingSpendConfirmation { .. } => Some((event.txid, Some(event.height))), + _ => None, + }) + }); + let (confirmed_spend_txid, confirmed_spend_height) = + if let Some((txid, height)) = confirmed_spend_info { + (txid, height) + } else { + return; + }; // If the channel is force closed, try to claim the output from this preimage. // First check if a counterparty commitment transaction has been broadcasted: macro_rules! claim_htlcs { ($commitment_number: expr, $txid: expr, $htlcs: expr) => { - let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None, $htlcs); + let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None, $htlcs, confirmed_spend_height); let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); } @@ -3542,7 +3545,7 @@ impl ChannelMonitorImpl { // First, process non-htlc outputs (to_holder & to_counterparty) for (idx, outp) in tx.output.iter().enumerate() { if outp.script_pubkey == revokeable_p2wsh { - let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv, self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx()); + let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv, self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx(), height); let justice_package = PackageTemplate::build_package( commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), @@ -3563,7 +3566,7 @@ impl ChannelMonitorImpl { // per_commitment_data is corrupt or our commitment signing key leaked! return (claimable_outpoints, to_counterparty_output_info); } - let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), &self.onchain_tx_handler.channel_transaction_parameters.channel_type_features); + let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), &self.onchain_tx_handler.channel_transaction_parameters.channel_type_features, height); let counterparty_spendable_height = if htlc.offered { htlc.cltv_expiry } else { @@ -3617,7 +3620,7 @@ impl ChannelMonitorImpl { (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref())) ), logger); let (htlc_claim_reqs, counterparty_output_info) = - self.get_counterparty_output_claim_info(commitment_number, commitment_txid, Some(tx), per_commitment_option); + self.get_counterparty_output_claim_info(commitment_number, commitment_txid, Some(tx), per_commitment_option, Some(height)); to_counterparty_output_info = counterparty_output_info; for req in htlc_claim_reqs { claimable_outpoints.push(req); @@ -3628,7 +3631,7 @@ impl ChannelMonitorImpl { } /// Returns the HTLC claim package templates and the counterparty output info - fn get_counterparty_output_claim_info(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option>)>>) + fn get_counterparty_output_claim_info(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option>)>>, confirmation_height: Option) -> (Vec, CommitmentTxCounterpartyOutputInfo) { let mut claimable_outpoints = Vec::new(); let mut to_counterparty_output_info: CommitmentTxCounterpartyOutputInfo = None; @@ -3688,13 +3691,15 @@ impl ChannelMonitorImpl { CounterpartyOfferedHTLCOutput::build(*per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, - preimage.unwrap(), htlc.clone(), self.onchain_tx_handler.channel_type_features().clone())) + preimage.unwrap(), htlc.clone(), self.onchain_tx_handler.channel_type_features().clone(), + confirmation_height)) } else { PackageSolvingData::CounterpartyReceivedHTLCOutput( CounterpartyReceivedHTLCOutput::build(*per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, - htlc.clone(), self.onchain_tx_handler.channel_type_features().clone())) + htlc.clone(), self.onchain_tx_handler.channel_type_features().clone(), + confirmation_height)) }; let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry); claimable_outpoints.push(counterparty_package); @@ -3736,7 +3741,7 @@ impl ChannelMonitorImpl { per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, tx.output[idx].value, self.counterparty_commitment_params.on_counterparty_tx_csv, - false + false, height, ); let justice_package = PackageTemplate::build_package( htlc_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), @@ -3765,7 +3770,7 @@ impl ChannelMonitorImpl { if let Some(transaction_output_index) = htlc.transaction_output_index { let (htlc_output, counterparty_spendable_height) = if htlc.offered { let htlc_output = HolderHTLCOutput::build_offered( - htlc.amount_msat, htlc.cltv_expiry, self.onchain_tx_handler.channel_type_features().clone() + htlc.amount_msat, htlc.cltv_expiry, self.onchain_tx_handler.channel_type_features().clone(), conf_height ); (htlc_output, conf_height) } else { @@ -3776,7 +3781,7 @@ impl ChannelMonitorImpl { continue; }; let htlc_output = HolderHTLCOutput::build_accepted( - payment_preimage, htlc.amount_msat, self.onchain_tx_handler.channel_type_features().clone() + payment_preimage, htlc.amount_msat, self.onchain_tx_handler.channel_type_features().clone(), conf_height ); (htlc_output, htlc.cltv_expiry) }; diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 2a43b006920..ae221c1c61d 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -278,6 +278,9 @@ pub struct OnchainTxHandler { #[cfg(not(test))] claimable_outpoints: HashMap, + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) locktimed_packages: BTreeMap>, + #[cfg(not(any(test, feature = "_test_utils")))] locktimed_packages: BTreeMap>, onchain_events_awaiting_threshold_conf: Vec, @@ -862,9 +865,10 @@ impl OnchainTxHandler { // Because fuzzing can cause hash collisions, we can end up with conflicting claim // ids here, so we only assert when not fuzzing. debug_assert!(cfg!(fuzzing) || self.pending_claim_requests.get(&claim_id).is_none()); - for k in req.outpoints() { - log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout); - self.claimable_outpoints.insert(k.clone(), (claim_id, conf_height)); + for (k, outpoint_confirmation_height) in req.outpoints_and_creation_heights() { + let creation_height = outpoint_confirmation_height.unwrap_or(conf_height); + log_info!(logger, "Registering claiming request for {}:{}, which exists as of height {creation_height}", k.txid, k.vout); + self.claimable_outpoints.insert(k.clone(), (claim_id, creation_height)); } self.pending_claim_requests.insert(claim_id, req); } @@ -969,6 +973,17 @@ impl OnchainTxHandler { panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map"); } } + + // Also remove/split any locktimed packages whose inputs have been spent by this transaction. + self.locktimed_packages.retain(|_locktime, packages|{ + packages.retain_mut(|package| { + if let Some(p) = package.split_package(&inp.previous_output) { + claimed_outputs_material.push(p); + } + !package.outpoints().is_empty() + }); + !packages.is_empty() + }); } for package in claimed_outputs_material.drain(..) { let entry = OnchainEventEntry { @@ -1104,6 +1119,13 @@ impl OnchainTxHandler { //- resurect outpoint back in its claimable set and regenerate tx match entry.event { OnchainEvent::ContentiousOutpoint { package } => { + // We pass 0 to `package_locktime` to get the actual required locktime. + let package_locktime = package.package_locktime(0); + if package_locktime >= height { + self.locktimed_packages.entry(package_locktime).or_default().push(package); + continue; + } + if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) { if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) { assert!(request.merge_package(package, height).is_ok()); @@ -1408,6 +1430,7 @@ mod tests { htlc.amount_msat, htlc.cltv_expiry, ChannelTypeFeatures::only_static_remote_key(), + 0, )), 0, )); diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index bd6912c21f8..9fe16915be4 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -122,7 +122,7 @@ const HIGH_FREQUENCY_BUMP_INTERVAL: u32 = 1; /// /// CSV and pubkeys are used as part of a witnessScript redeeming a balance output, amount is used /// as part of the signature hash and revocation secret to generate a satisfying witness. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct RevokedOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, @@ -132,10 +132,12 @@ pub(crate) struct RevokedOutput { amount: Amount, on_counterparty_tx_csv: u16, is_counterparty_balance_on_anchors: Option<()>, + // Added in LDK 0.1.4/0.2 and always set since. + outpoint_confirmation_height: Option, } impl RevokedOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: Amount, on_counterparty_tx_csv: u16, is_counterparty_balance_on_anchors: bool) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: Amount, on_counterparty_tx_csv: u16, is_counterparty_balance_on_anchors: bool, outpoint_confirmation_height: u32) -> Self { RevokedOutput { per_commitment_point, counterparty_delayed_payment_base_key, @@ -144,13 +146,15 @@ impl RevokedOutput { weight: WEIGHT_REVOKED_OUTPUT, amount, on_counterparty_tx_csv, - is_counterparty_balance_on_anchors: if is_counterparty_balance_on_anchors { Some(()) } else { None } + is_counterparty_balance_on_anchors: if is_counterparty_balance_on_anchors { Some(()) } else { None }, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } } impl_writeable_tlv_based!(RevokedOutput, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, per_commitment_key, required), @@ -168,7 +172,7 @@ impl_writeable_tlv_based!(RevokedOutput, { /// /// CSV is used as part of a witnessScript redeeming a balance output, amount is used as part /// of the signature hash and revocation secret to generate a satisfying witness. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct RevokedHTLCOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, @@ -177,10 +181,12 @@ pub(crate) struct RevokedHTLCOutput { weight: u64, amount: u64, htlc: HTLCOutputInCommitment, + // Added in LDK 0.1.4/0.2 and always set since. + outpoint_confirmation_height: Option, } impl RevokedHTLCOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: u64, htlc: HTLCOutputInCommitment, channel_type_features: &ChannelTypeFeatures) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: u64, htlc: HTLCOutputInCommitment, channel_type_features: &ChannelTypeFeatures, outpoint_confirmation_height: u32) -> Self { let weight = if htlc.offered { weight_revoked_offered_htlc(channel_type_features) } else { weight_revoked_received_htlc(channel_type_features) }; RevokedHTLCOutput { per_commitment_point, @@ -189,13 +195,15 @@ impl RevokedHTLCOutput { per_commitment_key, weight, amount, - htlc + htlc, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } } impl_writeable_tlv_based!(RevokedHTLCOutput, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, per_commitment_key, required), @@ -212,7 +220,7 @@ impl_writeable_tlv_based!(RevokedHTLCOutput, { /// The preimage is used as part of the witness. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct CounterpartyOfferedHTLCOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, @@ -220,10 +228,12 @@ pub(crate) struct CounterpartyOfferedHTLCOutput { preimage: PaymentPreimage, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, + // Added in LDK 0.1.4/0.2 and always set since. + outpoint_confirmation_height: Option, } impl CounterpartyOfferedHTLCOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: Option) -> Self { CounterpartyOfferedHTLCOutput { per_commitment_point, counterparty_delayed_payment_base_key, @@ -231,6 +241,7 @@ impl CounterpartyOfferedHTLCOutput { preimage, htlc, channel_type_features, + outpoint_confirmation_height, } } } @@ -240,6 +251,7 @@ impl Writeable for CounterpartyOfferedHTLCOutput { let legacy_deserialization_prevention_marker = chan_utils::legacy_deserialization_prevention_marker_for_channel_type_features(&self.channel_type_features); write_tlv_fields!(writer, { (0, self.per_commitment_point, required), + (1, self.outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, self.counterparty_delayed_payment_base_key, required), (4, self.counterparty_htlc_base_key, required), (6, self.preimage, required), @@ -260,9 +272,11 @@ impl Readable for CounterpartyOfferedHTLCOutput { let mut htlc = RequiredWrapper(None); let mut _legacy_deserialization_prevention_marker: Option<()> = None; let mut channel_type_features = None; + let mut outpoint_confirmation_height = None; read_tlv_fields!(reader, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, preimage, required), @@ -279,7 +293,8 @@ impl Readable for CounterpartyOfferedHTLCOutput { counterparty_htlc_base_key: counterparty_htlc_base_key.0.unwrap(), preimage: preimage.0.unwrap(), htlc: htlc.0.unwrap(), - channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()) + channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()), + outpoint_confirmation_height, }) } } @@ -290,23 +305,25 @@ impl Readable for CounterpartyOfferedHTLCOutput { /// witnessScript. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct CounterpartyReceivedHTLCOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, + outpoint_confirmation_height: Option, } impl CounterpartyReceivedHTLCOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: Option) -> Self { CounterpartyReceivedHTLCOutput { per_commitment_point, counterparty_delayed_payment_base_key, counterparty_htlc_base_key, htlc, - channel_type_features + channel_type_features, + outpoint_confirmation_height, } } } @@ -316,6 +333,7 @@ impl Writeable for CounterpartyReceivedHTLCOutput { let legacy_deserialization_prevention_marker = chan_utils::legacy_deserialization_prevention_marker_for_channel_type_features(&self.channel_type_features); write_tlv_fields!(writer, { (0, self.per_commitment_point, required), + (1, self.outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, self.counterparty_delayed_payment_base_key, required), (4, self.counterparty_htlc_base_key, required), (6, self.htlc, required), @@ -334,9 +352,11 @@ impl Readable for CounterpartyReceivedHTLCOutput { let mut htlc = RequiredWrapper(None); let mut _legacy_deserialization_prevention_marker: Option<()> = None; let mut channel_type_features = None; + let mut outpoint_confirmation_height = None; read_tlv_fields!(reader, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, htlc, required), @@ -351,7 +371,8 @@ impl Readable for CounterpartyReceivedHTLCOutput { counterparty_delayed_payment_base_key: counterparty_delayed_payment_base_key.0.unwrap(), counterparty_htlc_base_key: counterparty_htlc_base_key.0.unwrap(), htlc: htlc.0.unwrap(), - channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()) + channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()), + outpoint_confirmation_height, }) } } @@ -362,31 +383,34 @@ impl Readable for CounterpartyReceivedHTLCOutput { /// Preimage is only included as part of the witness in former case. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct HolderHTLCOutput { preimage: Option, amount_msat: u64, /// Defaults to 0 for HTLC-Success transactions, which have no expiry cltv_expiry: u32, channel_type_features: ChannelTypeFeatures, + outpoint_confirmation_height: Option, } impl HolderHTLCOutput { - pub(crate) fn build_offered(amount_msat: u64, cltv_expiry: u32, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build_offered(amount_msat: u64, cltv_expiry: u32, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: u32) -> Self { HolderHTLCOutput { preimage: None, amount_msat, cltv_expiry, channel_type_features, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } - pub(crate) fn build_accepted(preimage: PaymentPreimage, amount_msat: u64, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build_accepted(preimage: PaymentPreimage, amount_msat: u64, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: u32) -> Self { HolderHTLCOutput { preimage: Some(preimage), amount_msat, cltv_expiry: 0, channel_type_features, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } } @@ -396,6 +420,7 @@ impl Writeable for HolderHTLCOutput { let legacy_deserialization_prevention_marker = chan_utils::legacy_deserialization_prevention_marker_for_channel_type_features(&self.channel_type_features); write_tlv_fields!(writer, { (0, self.amount_msat, required), + (1, self.outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, self.cltv_expiry, required), (4, self.preimage, option), (6, legacy_deserialization_prevention_marker, option), @@ -412,9 +437,11 @@ impl Readable for HolderHTLCOutput { let mut preimage = None; let mut _legacy_deserialization_prevention_marker: Option<()> = None; let mut channel_type_features = None; + let mut outpoint_confirmation_height = None; read_tlv_fields!(reader, { (0, amount_msat, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, cltv_expiry, required), (4, preimage, option), (6, _legacy_deserialization_prevention_marker, option), @@ -427,7 +454,8 @@ impl Readable for HolderHTLCOutput { amount_msat: amount_msat.0.unwrap(), cltv_expiry: cltv_expiry.0.unwrap(), preimage, - channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()) + channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()), + outpoint_confirmation_height, }) } } @@ -437,7 +465,7 @@ impl Readable for HolderHTLCOutput { /// witnessScript is used as part of the witness redeeming the funding utxo. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct HolderFundingOutput { funding_redeemscript: ScriptBuf, pub(crate) funding_amount: Option, @@ -496,7 +524,7 @@ impl Readable for HolderFundingOutput { /// /// The generic API offers access to an outputs common attributes or allow transformation such as /// finalizing an input claiming the output. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) enum PackageSolvingData { RevokedOutput(RevokedOutput), RevokedHTLCOutput(RevokedHTLCOutput), @@ -575,6 +603,35 @@ impl PackageSolvingData { } } + fn input_confirmation_height(&self) -> Option { + match self { + PackageSolvingData::RevokedOutput(RevokedOutput { + outpoint_confirmation_height, + .. + }) + | PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput { + outpoint_confirmation_height, + .. + }) + | PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput { + outpoint_confirmation_height, + .. + }) + | PackageSolvingData::CounterpartyReceivedHTLCOutput( + CounterpartyReceivedHTLCOutput { outpoint_confirmation_height, .. }, + ) + | PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput { + outpoint_confirmation_height, + .. + }) => *outpoint_confirmation_height, + // We don't bother to track `HolderFundingOutput`'s creation height as its the funding + // transaction itself and we build `HolderFundingOutput`s before we actually get the + // commitment transaction confirmed. + PackageSolvingData::HolderFundingOutput(_) => None, + } + } + + #[rustfmt::skip] fn as_tx_input(&self, previous_output: BitcoinOutPoint) -> TxIn { let sequence = match self { PackageSolvingData::RevokedOutput(_) => Sequence::ENABLE_RBF_NO_LOCKTIME, @@ -737,7 +794,7 @@ impl_writeable_tlv_based_enum_legacy!(PackageSolvingData, ; /// That way we avoid claiming in too many discrete transactions while also avoiding /// unnecessarily exposing ourselves to pinning attacks or delaying claims when we could have /// claimed at least part of the available outputs quickly and without risk. -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] enum AggregationCluster { /// Our counterparty can potentially claim this output. Pinnable, @@ -748,7 +805,7 @@ enum AggregationCluster { /// A malleable package might be aggregated with other packages to save on fees. /// A untractable package has been counter-signed and aggregable will break cached counterparty signatures. -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] enum PackageMalleability { Malleable(AggregationCluster), Untractable, @@ -763,7 +820,7 @@ enum PackageMalleability { /// /// As packages are time-sensitive, we fee-bump and rebroadcast them at scheduled intervals. /// Failing to confirm a package translate as a loss of funds for the user. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct PackageTemplate { // List of onchain outputs and solving data to generate satisfying witnesses. inputs: Vec<(BitcoinOutPoint, PackageSolvingData)>, @@ -877,6 +934,12 @@ impl PackageTemplate { pub(crate) fn outpoints(&self) -> Vec<&BitcoinOutPoint> { self.inputs.iter().map(|(o, _)| o).collect() } + pub(crate) fn outpoints_and_creation_heights( + &self, + ) -> impl Iterator)> { + self.inputs.iter().map(|(o, p)| (o, p.input_confirmation_height())) + } + pub(crate) fn inputs(&self) -> impl ExactSizeIterator { self.inputs.iter().map(|(_, i)| i) } @@ -1394,7 +1457,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let dumb_scalar = SecretKey::from_slice(&>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap(); let dumb_point = PublicKey::from_secret_key(&secp_ctx, &dumb_scalar); - PackageSolvingData::RevokedOutput(RevokedOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, Amount::ZERO, 0, $is_counterparty_balance_on_anchors)) + PackageSolvingData::RevokedOutput(RevokedOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, Amount::ZERO, 0, $is_counterparty_balance_on_anchors, 0)) } } } @@ -1407,7 +1470,7 @@ mod tests { let dumb_point = PublicKey::from_secret_key(&secp_ctx, &dumb_scalar); let hash = PaymentHash([1; 32]); let htlc = HTLCOutputInCommitment { offered: false, amount_msat: 1_000_000, cltv_expiry: 0, payment_hash: hash, transaction_output_index: None }; - PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, 1_000_000 / 1_000, htlc, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies())) + PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, 1_000_000 / 1_000, htlc, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), 0)) } } } @@ -1420,7 +1483,7 @@ mod tests { let dumb_point = PublicKey::from_secret_key(&secp_ctx, &dumb_scalar); let hash = PaymentHash([1; 32]); let htlc = HTLCOutputInCommitment { offered: true, amount_msat: $amt, cltv_expiry: $expiry, payment_hash: hash, transaction_output_index: None }; - PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), htlc, $features)) + PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), htlc, $features, None)) } } } @@ -1434,7 +1497,7 @@ mod tests { let hash = PaymentHash([1; 32]); let preimage = PaymentPreimage([2;32]); let htlc = HTLCOutputInCommitment { offered: false, amount_msat: $amt, cltv_expiry: 0, payment_hash: hash, transaction_output_index: None }; - PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), preimage, htlc, $features)) + PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), preimage, htlc, $features, None)) } } } @@ -1443,7 +1506,7 @@ mod tests { ($features: expr) => { { let preimage = PaymentPreimage([2;32]); - PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_accepted(preimage, 0, $features)) + PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_accepted(preimage, 0, $features, 0)) } } } @@ -1451,7 +1514,7 @@ mod tests { macro_rules! dumb_offered_htlc_output { ($cltv_expiry: expr, $features: expr) => { { - PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_offered(0, $cltv_expiry, $features)) + PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_offered(0, $cltv_expiry, $features, 0)) } } } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index ad1e6c26b98..657e089d293 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2931,16 +2931,28 @@ fn test_inbound_reload_without_init_mon() { do_test_inbound_reload_without_init_mon(false, false); } -#[test] -fn test_blocked_chan_preimage_release() { +#[derive(PartialEq, Eq)] +enum BlockedUpdateComplMode { + Async, + AtReload, + Sync, +} + +fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode) { // Test that even if a channel's `ChannelMonitorUpdate` flow is blocked waiting on an event to // be handled HTLC preimage `ChannelMonitorUpdate`s will still go out. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_mon; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_reload; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000); @@ -2968,25 +2980,62 @@ fn test_blocked_chan_preimage_release() { expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000); let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + if completion_mode != BlockedUpdateComplMode::Sync { + // We use to incorrectly handle monitor update completion in cases where we completed a + // monitor update async or after reload. We test both based on the `completion_mode`. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + } nodes[1].node.handle_update_fulfill_htlc(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2)); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + if completion_mode == BlockedUpdateComplMode::AtReload { + let node_ser = nodes[1].node.encode(); + let chan_mon_0 = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_mon_1 = get_monitor!(nodes[1], chan_id_2).encode(); + + let mons = &[&chan_mon_0[..], &chan_mon_1[..]]; + reload_node!(nodes[1], &node_ser, mons, persister, new_chain_mon, nodes_1_reload); + + nodes[0].node.peer_disconnected(node_b_id); + nodes[2].node.peer_disconnected(node_b_id); + + let mut a_b_reconnect = ReconnectArgs::new(&nodes[0], &nodes[1]); + a_b_reconnect.pending_htlc_claims.1 = 1; + // Note that we will expect no final RAA monitor update in + // `commitment_signed_dance_through_cp_raa` during the reconnect, matching the below case. + reconnect_nodes(a_b_reconnect); + reconnect_nodes(ReconnectArgs::new(&nodes[2], &nodes[1])); + } else if completion_mode == BlockedUpdateComplMode::Async { + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_2).unwrap().clone(); + nodes[1] + .chain_monitor + .chain_monitor + .channel_monitor_updated(outpoint, latest_update) + .unwrap(); + } // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the // channel. - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed); - check_added_monitors(&nodes[1], 1); - let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); - assert!(a.is_none()); + // Note that when completing as a side effect of a reload we completed the CS dance in + // `reconnect_nodes` above. + if completion_mode != BlockedUpdateComplMode::AtReload { + nodes[1].node.handle_commitment_signed( + node_a_id, + &as_htlc_fulfill_updates.commitment_signed, + ); + check_added_monitors(&nodes[1], 1); + let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + assert!(a.is_none()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa); - check_added_monitors(&nodes[1], 0); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); + check_added_monitors(&nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); + assert_eq!(events.len(), 3, "{events:?}"); if let Event::PaymentSent { .. } = events[0] {} else { panic!(); } if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); } @@ -3004,6 +3053,13 @@ fn test_blocked_chan_preimage_release() { expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); } +#[test] +fn test_blocked_chan_preimage_release() { + do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::AtReload); + do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::Sync); + do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::Async); +} + fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) { // When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages // from the downstream channel, we immediately claim the HTLC on the upstream channel, before diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 96492ef97f2..16803a45bfd 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2452,6 +2452,13 @@ impl ChannelContext where SP::Target: SignerProvider { self.latest_monitor_update_id } + pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 { + if self.blocked_monitor_updates.is_empty() { + return self.get_latest_monitor_update_id(); + } + self.blocked_monitor_updates[0].update.update_id - 1 + } + pub fn should_announce(&self) -> bool { self.config.announce_for_forwarding } @@ -3209,7 +3216,7 @@ impl ChannelContext where SP::Target: SignerProvider { /// Creates a set of keys for build_commitment_transaction to generate a transaction which we /// will sign and send to our counterparty. /// If an Err is returned, it is a ChannelError::Close (for get_funding_created) - fn build_remote_transaction_keys(&self) -> TxCreationKeys { + pub fn build_remote_transaction_keys(&self) -> TxCreationKeys { let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint; let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; let counterparty_pubkeys = self.get_counterparty_pubkeys(); @@ -3767,14 +3774,14 @@ impl ChannelContext where SP::Target: SignerProvider { // committed outbound HTLCs, see below. let mut included_htlcs = 0; for ref htlc in context.pending_inbound_htlcs.iter() { - if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat { + if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat { continue } included_htlcs += 1; } for ref htlc in context.pending_outbound_htlcs.iter() { - if htlc.amount_msat / 1000 <= real_dust_limit_success_sat { + if htlc.amount_msat / 1000 < real_dust_limit_success_sat { continue } // We only include outbound HTLCs if it will not be included in their next commitment_signed, @@ -3890,7 +3897,7 @@ impl ChannelContext where SP::Target: SignerProvider { // monitor update to the user, even if we return one). // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. if !self.channel_state.is_pre_funded_state() { - self.latest_monitor_update_id += 1; + self.latest_monitor_update_id = self.get_latest_unblocked_monitor_update_id() + 1; Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, counterparty_node_id: Some(self.counterparty_node_id), @@ -5934,6 +5941,7 @@ impl Channel where { assert!(self.context.channel_state.is_monitor_update_in_progress()); self.context.channel_state.clear_monitor_update_in_progress(); + assert_eq!(self.blocked_monitor_updates_pending(), 0); // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to // (re-)broadcast the funding transaction as we may have declined to broadcast it when we @@ -7128,8 +7136,7 @@ impl Channel where /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight. pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 { - if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); } - self.context.blocked_monitor_updates[0].update.update_id - 1 + self.context.get_latest_unblocked_monitor_update_id() } /// Returns the next blocked monitor update, if one exists, and a bool which indicates a diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b973432056a..a9e14f17f99 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6347,7 +6347,9 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + if chan.blocked_monitor_updates_pending() == 0 { + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + } } else { let update_actions = peer_state.monitor_update_blocked_actions .remove(&channel_id).unwrap_or(Vec::new()); @@ -7625,8 +7627,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) { if chan.is_awaiting_monitor_update() { - log_trace!(logger, "Channel is open and awaiting update, resuming it"); - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + if chan.blocked_monitor_updates_pending() == 0 { + log_trace!(logger, "Channel is open and awaiting update, resuming it"); + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + } else { + log_trace!(logger, "Channel is open and awaiting update, leaving it blocked due to a blocked monitor update"); + } } else { log_trace!(logger, "Channel is open but not awaiting update"); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 04295073861..420978ad5fc 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -335,6 +335,28 @@ fn do_connect_block_without_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b } } +pub fn provide_anchor_reserves<'a, 'b, 'c>(nodes: &[Node<'a, 'b, 'c>]) -> Transaction { + let mut output = Vec::with_capacity(nodes.len()); + for node in nodes { + output.push(TxOut { + value: Amount::ONE_BTC, + script_pubkey: node.wallet_source.get_change_script().unwrap(), + }); + } + let tx = Transaction { + version: TxVersion::TWO, + lock_time: LockTime::ZERO, + input: vec![TxIn { ..Default::default() }], + output, + }; + let height = nodes[0].best_block_info().1 + 1; + let block = create_dummy_block(nodes[0].best_block_hash(), height, vec![tx.clone()]); + for node in nodes { + do_connect_block_with_consistency_checks(node, block.clone(), false); + } + tx +} + pub fn disconnect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, count: u32) { call_claimable_balances(node); eprintln!("Disconnecting {} blocks using Block Connection Style: {:?}", count, *node.connect_style.borrow()); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index fc8bf38519d..2cbf04a40ff 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3329,33 +3329,9 @@ fn test_htlc_on_chain_success() { _ => panic!("Unexpected event"), } - macro_rules! check_tx_local_broadcast { - ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { { - let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap(); - // HTLC timeout claims for non-anchor channels are only aggregated when claimed from the - // remote commitment transaction. - if $htlc_offered { - assert_eq!(node_txn.len(), 2); - for tx in node_txn.iter() { - check_spends!(tx, $commitment_tx); - assert_ne!(tx.lock_time, LockTime::ZERO); - assert_eq!(tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - assert!(tx.output[0].script_pubkey.is_p2wsh()); // revokeable output - } - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); - } else { - assert_eq!(node_txn.len(), 1); - check_spends!(node_txn[0], $commitment_tx); - assert_ne!(node_txn[0].lock_time, LockTime::ZERO); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert!(node_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment - assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); - } - node_txn.clear(); - } } - } - // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success. - check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]); + // nodes[1] does not broadcast its own timeout-claim of the output as nodes[2] just claimed it + // via success. + assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); // Broadcast legit commitment tx from A on B's chain // Broadcast preimage tx by B on offered output from A commitment tx on A's chain @@ -3416,7 +3392,17 @@ fn test_htlc_on_chain_success() { _ => panic!("Unexpected event"), } } - check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]); + // HTLC timeout claims for non-anchor channels are only aggregated when claimed from the + // remote commitment transaction. + let mut node_txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(node_txn.len(), 2); + for tx in node_txn.iter() { + check_spends!(tx, node_a_commitment_tx[0]); + assert_ne!(tx.lock_time, LockTime::ZERO); + assert_eq!(tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(tx.output[0].script_pubkey.is_p2wsh()); // revokeable output + } + assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); } fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { @@ -11698,3 +11684,293 @@ fn test_funding_signed_event() { nodes[1].node.get_and_clear_pending_msg_events(); } +#[test] +pub fn test_dust_limit_fee_accounting() { + do_test_dust_limit_fee_accounting(false); + do_test_dust_limit_fee_accounting(true); +} + +pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { + // Test that when a channel funder sends HTLCs exactly on the dust limit + // of the funder, the fundee correctly accounts for the additional fee on the + // funder's commitment transaction due to those additional non-dust HTLCs when + // checking for any infrigements on the funder's reserve. + + let channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(); + + let chanmon_cfgs = create_chanmon_cfgs(2); + + let mut default_config = test_default_channel_config(); + default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + default_config.manually_accept_inbound_channels = true; + + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + // Set a HTLC amount that is equal to the dust limit of the funder + const HTLC_AMT_SAT: u64 = 354; + + const CHANNEL_VALUE_SAT: u64 = 100_000; + + const FEERATE_PER_KW: u32 = 253; + + let commit_tx_fee_sat = + chan_utils::commit_tx_fee_sat(FEERATE_PER_KW, MIN_AFFORDABLE_HTLC_COUNT, &channel_type); + + // By default the reserve is set to 1% or 1000sat, whichever is higher + let channel_reserve_satoshis = 1_000; + + // Set node 0's balance to pay for exactly MIN_AFFORDABLE_HTLC_COUNT non-dust HTLCs on the channel, minus some offset + let node_0_balance_sat = commit_tx_fee_sat + + channel_reserve_satoshis + + 2 * crate::ln::channel::ANCHOR_OUTPUT_VALUE_SATOSHI + + MIN_AFFORDABLE_HTLC_COUNT as u64 * HTLC_AMT_SAT + - if can_afford { 0 } else { 1 }; + let mut node_1_balance_sat = CHANNEL_VALUE_SAT - node_0_balance_sat; + + let chan_id = create_chan_between_nodes_with_value( + &nodes[0], + &nodes[1], + CHANNEL_VALUE_SAT, + node_1_balance_sat * 1000, + ) + .3; + + { + // Double check the reserve that node 0 has to maintain here + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[1], nodes[0], per_peer_state_lock, peer_state_lock, chan_id); + assert_eq!( + chan.context().holder_selected_channel_reserve_satoshis, + channel_reserve_satoshis + ); + } + { + // Double check the dust limit on node 0's commitment transactions; when node 0 + // adds a HTLC, node 1 will check that the fee on node 0's commitment transaction + // does not dip under the node 1 selected reserve. + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[0], nodes[1], per_peer_state_lock, peer_state_lock, chan_id); + assert_eq!(chan.context().holder_dust_limit_satoshis, HTLC_AMT_SAT); + } + + // Precompute the route to skip any router complaints when sending the last HTLC + let (route_0_1, payment_hash_0_1, _, payment_secret_0_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], HTLC_AMT_SAT * 1000); + + let mut htlcs = Vec::new(); + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT - 1 { + let (_payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], HTLC_AMT_SAT * 1000); + // Grab a snapshot of these HTLCs to manually build the commitment transaction later... + let accepted_htlc = chan_utils::HTLCOutputInCommitment { + offered: false, + amount_msat: HTLC_AMT_SAT * 1000, + // Hard-coded to match the expected value + cltv_expiry: 81, + payment_hash, + transaction_output_index: None, + }; + htlcs.push((accepted_htlc, ())); + } + + // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!"); + + let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; + + let onion_keys = + onion_utils::construct_onion_keys(&secp_ctx, &route_0_1.paths[0], &session_priv).unwrap(); + let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret_0_1); + let (onion_payloads, amount_msat, cltv_expiry) = onion_utils::build_onion_payloads( + &route_0_1.paths[0], + HTLC_AMT_SAT * 1000, + &recipient_onion_fields, + cur_height, + &None, + None, + ) + .unwrap(); + let onion_routing_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash_0_1) + .unwrap(); + // Double check the hard-coded value + assert_eq!(cltv_expiry, 81); + let msg = msgs::UpdateAddHTLC { + channel_id: chan_id, + htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64 - 1, + amount_msat, + payment_hash: payment_hash_0_1, + cltv_expiry, + onion_routing_packet, + skimmed_fee_msat: None, + blinding_point: None, + }; + + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + + if !can_afford { + let err = "Remote HTLC add would put them under remote reserve value".to_string(); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", &err, 3); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + let reason = ClosureReason::ProcessingError { err }; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], CHANNEL_VALUE_SAT); + check_added_monitors(&nodes[1], 1); + } else { + // Now manually create the commitment_signed message corresponding to the update_add + // nodes[0] just sent. In the code for construction of this message, "local" refers + // to the sender of the message, and "remote" refers to the receiver. + + const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; + + let (local_secret, next_local_point) = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); + let local_chan = if let ChannelPhase::Funded(chan) = &*channel { + chan + } else { + panic!(); + }; + let chan_signer = local_chan.get_signer(); + // Make the signer believe we validated another commitment, so we can release the secret + chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; + + ( + chan_signer + .as_ref() + .release_commitment_secret( + INITIAL_COMMITMENT_NUMBER - MIN_AFFORDABLE_HTLC_COUNT as u64 + 1, + ) + .unwrap(), + chan_signer + .as_ref() + .get_per_commitment_point( + INITIAL_COMMITMENT_NUMBER - MIN_AFFORDABLE_HTLC_COUNT as u64, + &secp_ctx, + ) + .unwrap(), + ) + }; + + // Build the remote commitment transaction so we can sign it, and then later use the + // signature for the commitment_signed message. + let local_chan_balance = node_0_balance_sat + - HTLC_AMT_SAT * MIN_AFFORDABLE_HTLC_COUNT as u64 + - 2 * crate::ln::channel::ANCHOR_OUTPUT_VALUE_SATOSHI + - chan_utils::commit_tx_fee_sat( + FEERATE_PER_KW, + MIN_AFFORDABLE_HTLC_COUNT, + &channel_type, + ); + + let accepted_htlc_info = chan_utils::HTLCOutputInCommitment { + offered: false, + amount_msat: HTLC_AMT_SAT * 1000, + cltv_expiry, + payment_hash: payment_hash_0_1, + transaction_output_index: None, + }; + htlcs.push((accepted_htlc_info, ())); + + let commitment_number = INITIAL_COMMITMENT_NUMBER - MIN_AFFORDABLE_HTLC_COUNT as u64; + + let res = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); + let chan_signer = if let ChannelPhase::Funded(chan) = &*channel { + chan.get_signer() + } else { + panic!(); + }; + + let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( + commitment_number, + node_1_balance_sat, + local_chan_balance, + channel.context().channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys.funding_pubkey, + channel.context().channel_transaction_parameters.holder_pubkeys.funding_pubkey, + channel.context().build_remote_transaction_keys(), + FEERATE_PER_KW, + &mut htlcs, + &channel.context().channel_transaction_parameters.as_counterparty_broadcastable(), + ); + chan_signer + .as_ecdsa() + .unwrap() + .sign_counterparty_commitment( + &commitment_tx, + Vec::new(), + Vec::new(), + &secp_ctx, + ) + .unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id: chan_id, + signature: res.0, + htlc_signatures: res.1, + batch: None, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }; + + // Send the commitment_signed message to the nodes[1]. + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); + let _ = nodes[1].node.get_and_clear_pending_msg_events(); + + // Send the RAA to nodes[1]. + let raa_msg = msgs::RevokeAndACK { + channel_id: chan_id, + per_commitment_secret: local_secret, + next_per_commitment_point: next_local_point, + #[cfg(taproot)] + next_local_nonce: None, + }; + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); + + // The HTLC actually fails here in `fn validate_commitment_signed` due to a fee spike buffer + // violation. It nonetheless passed all checks in `fn validate_update_add_htlc`. + + //expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::FailedPayment { payment_hash: payment_hash_0_1 }] + ); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + // Make sure the HTLC failed in the way we expect. + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, + .. + } => { + assert_eq!(update_fail_htlcs.len(), 1); + update_fail_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + }; + nodes[1].logger.assert_log("lightning::ln::channel", + format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); + + check_added_monitors(&nodes[1], 2); + } +} diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 92b19790be5..d105d69edd2 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -13,7 +13,7 @@ use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescr use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE, Balance, BalanceSource, ChannelMonitorUpdateStep}; use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; -use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; +use crate::events::bump_transaction::BumpTransactionEvent; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use crate::ln::channel; use crate::ln::types::ChannelId; @@ -462,25 +462,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000); @@ -729,8 +711,9 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { test_spendable_output(&nodes[0], &remote_txn[0], false); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - // After broadcasting the HTLC claim transaction, node A will still consider the HTLC - // possibly-claimable up to ANTI_REORG_DELAY, at which point it will drop it. + // After confirming the HTLC claim transaction, node A will no longer attempt to claim said + // HTLC, unless the transaction is reorged. However, we'll still report a + // `MaybeTimeoutClaimableHTLC` balance for it until we reach `ANTI_REORG_DELAY` confirmations. mine_transaction(&nodes[0], &b_broadcast_txn[0]); if prev_commitment_tx { expect_payment_path_successful!(nodes[0]); @@ -746,18 +729,10 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { // When the HTLC timeout output is spendable in the next block, A should broadcast it connect_blocks(&nodes[0], htlc_cltv_timeout - nodes[0].best_block_info().1); let a_broadcast_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - // Aggregated claim transaction. assert_eq!(a_broadcast_txn.len(), 1); check_spends!(a_broadcast_txn[0], remote_txn[0]); - assert_eq!(a_broadcast_txn[0].input.len(), 2); - assert_ne!(a_broadcast_txn[0].input[0].previous_output.vout, a_broadcast_txn[0].input[1].previous_output.vout); - // a_broadcast_txn [0] and [1] should spend the HTLC outputs of the commitment tx - assert!(a_broadcast_txn[0].input.iter().any(|input| remote_txn[0].output[input.previous_output.vout as usize].value.to_sat() == 3_000)); + assert_eq!(a_broadcast_txn[0].input.len(), 1); assert!(a_broadcast_txn[0].input.iter().any(|input| remote_txn[0].output[input.previous_output.vout as usize].value.to_sat() == 4_000)); - - // Confirm node B's claim for node A to remove that claim from the aggregated claim transaction. - mine_transaction(&nodes[0], &b_broadcast_txn[0]); - let a_broadcast_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); let a_htlc_timeout_tx = a_broadcast_txn.into_iter().last().unwrap(); // Once the HTLC-Timeout transaction confirms, A will no longer consider the HTLC @@ -865,25 +840,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); // Create a single channel with two pending HTLCs from nodes[0] to nodes[1], one which nodes[1] // knows the preimage for, one which it does not. @@ -1650,25 +1607,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); // Create some initial channels let (_, _, chan_id, funding_tx) = @@ -1951,16 +1890,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); + let coinbase_tx = provide_anchor_reserves(&nodes); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000); @@ -2241,25 +2171,7 @@ fn do_test_claimable_balance_correct_while_payment_pending(outbound_payment: boo let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(user_config), Some(user_config), Some(user_config)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + provide_anchor_reserves(&nodes); // Create a channel from A -> B let (_, _, chan_ab_id, funding_tx_ab) = @@ -2406,6 +2318,8 @@ fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let coinbase_tx = provide_anchor_reserves(&nodes); + let (_, _, _, chan_id, funding_tx) = create_chan_between_nodes_with_value( &nodes[0], &nodes[1], 1_000_000, 500_000_000 ); @@ -2424,17 +2338,6 @@ fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) { false, [nodes[1].node.get_our_node_id()], 1000000); check_added_monitors(&nodes[0], 1); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { // UTXO to attach fees to `htlc_tx` on anchors - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - // Set up a helper closure we'll use throughout our test. We should only expect retries without // bumps if fees have not increased after a block has been connected (assuming the height timer // re-evaluates at every block) or after `ChainMonitor::rebroadcast_pending_claims` is called. @@ -2538,6 +2441,8 @@ fn do_test_yield_anchors_events(have_htlcs: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config), Some(anchors_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let coinbase_tx = provide_anchor_reserves(&nodes); + let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value( &nodes, 0, 1, 1_000_000, 500_000_000 ); @@ -2613,16 +2518,6 @@ fn do_test_yield_anchors_events(have_htlcs: bool) { assert_eq!(holder_events.len(), 1); let (commitment_tx, anchor_tx) = match holder_events.pop().unwrap() { Event::BumpTransaction(event) => { - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { // UTXO to attach fees to `anchor_tx` - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); nodes[0].bump_tx_handler.handle_event(&event); let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); assert_eq!(txn.len(), 2); @@ -2738,6 +2633,8 @@ fn test_anchors_aggregated_revoked_htlc_tx() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let coinbase_tx = provide_anchor_reserves(&nodes); + let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000); let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000); @@ -2796,18 +2693,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() { assert_eq!(events.len(), 2); let mut revoked_commitment_txs = Vec::with_capacity(events.len()); let mut anchor_txs = Vec::with_capacity(events.len()); - for (idx, event) in events.into_iter().enumerate() { - let utxo_value = Amount::ONE_BTC * (idx + 1) as u64; - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { // UTXO to attach fees to `anchor_tx` - value: utxo_value, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, utxo_value); + for event in events { match event { Event::BumpTransaction(event) => nodes[1].bump_tx_handler.handle_event(&event), _ => panic!("Unexpected event"), @@ -3125,20 +3011,7 @@ fn do_test_monitor_claims_with_random_signatures(anchors: bool, confirm_counterp let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); // Open a channel and route a payment. We'll let it timeout to claim it. let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index b1b4f77c590..56760c510a3 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -10,13 +10,14 @@ //! Further functional tests which test blockchain reorganizations. use crate::chain::chaininterface::LowerBoundedFeeEstimator; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, Balance, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::OutPoint; use crate::chain::Confirm; use crate::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination, MessageSendEvent}; use crate::ln::msgs::{ChannelMessageHandler, Init}; use crate::ln::types::ChannelId; use crate::sign::OutputSpender; +use crate::types::payment::PaymentHash; use crate::util::ser::Writeable; use crate::util::string::UntrustedString; @@ -897,3 +898,226 @@ fn test_retries_own_commitment_broadcast_after_reorg() { do_test_retries_own_commitment_broadcast_after_reorg(true, false); do_test_retries_own_commitment_broadcast_after_reorg(true, true); } + +fn do_test_split_htlc_expiry_tracking(use_third_htlc: bool, reorg_out: bool) { + // Previously, we had a bug where if there were two HTLCs which expired at different heights, + // and a counterparty commitment transaction confirmed spending both of them, we'd continually + // rebroadcast attempted HTLC claims against the higher-expiry HTLC forever. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + + // This test relies on being able to consolidate HTLC claims into a single transaction, which + // requires anchors: + let mut config = test_default_channel_config(); + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + config.manually_accept_inbound_channels = true; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let coinbase_tx = provide_anchor_reserves(&nodes); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let (_, _, chan_id, funding_tx) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); + + // Route two non-dust HTLCs with different expiry, with a third having the same expiry as the + // second if `use_third_htlc` is set. + let (preimage_a, payment_hash_a, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000_000); + connect_blocks(&nodes[0], 2); + connect_blocks(&nodes[1], 2); + let (preimage_b, payment_hash_b, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000_000); + let payment_hash_c = if use_third_htlc { + route_payment(&nodes[0], &[&nodes[1]], 100_000_000).1 + } else { + PaymentHash([0; 32]) + }; + + // First disconnect peers so that we don't have to deal with messages: + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); + + // Give node B preimages so that it will claim the first two HTLCs on-chain. + nodes[1].node.claim_funds(preimage_a); + expect_payment_claimed!(nodes[1], payment_hash_a, 100_000_000); + nodes[1].node.claim_funds(preimage_b); + expect_payment_claimed!(nodes[1], payment_hash_b, 100_000_000); + check_added_monitors(&nodes[1], 2); + + let err = "Channel force-closed".to_string(); + + // Force-close and fetch node B's commitment transaction and the transaction claiming the first + // two HTLCs. + nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &node_a_id, err).unwrap(); + check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 10_000_000); + + let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 1); + let commitment_tx = txn.pop().unwrap(); + check_spends!(commitment_tx, funding_tx); + + mine_transaction(&nodes[0], &commitment_tx); + check_closed_broadcast(&nodes[0], 1, true); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 10_000_000); + check_added_monitors(&nodes[0], 1); + + mine_transaction(&nodes[1], &commitment_tx); + let mut bump_events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(bump_events.len(), 1); + match bump_events.pop().unwrap() { + Event::BumpTransaction(bump_event) => { + nodes[1].bump_tx_handler.handle_event(&bump_event); + }, + ev => panic!("Unexpected event {ev:?}"), + } + + let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); + if nodes[1].connect_style.borrow().updates_best_block_first() { + assert_eq!(txn.len(), 2, "{txn:?}"); + check_spends!(txn[0], funding_tx); + } else { + assert_eq!(txn.len(), 1, "{txn:?}"); + } + let bs_htlc_spend_tx = txn.pop().unwrap(); + check_spends!(bs_htlc_spend_tx, commitment_tx, coinbase_tx); + + // Now connect blocks until the first HTLC expires + assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0); + connect_blocks(&nodes[0], TEST_FINAL_CLTV - 2); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 1); + let as_first_htlc_spend_tx = txn.pop().unwrap(); + check_spends!(as_first_htlc_spend_tx, commitment_tx); + + // But confirm B's dual-HTLC-claim transaction instead. A should now have nothing to broadcast + // as the third HTLC (if there is one) won't expire for another block. + mine_transaction(&nodes[0], &bs_htlc_spend_tx); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 0); + + let sent_events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(sent_events.len(), 4, "{sent_events:?}"); + let mut found_expected_events = [false, false, false, false]; + for event in sent_events { + match event { + Event::PaymentSent { payment_hash, .. }|Event::PaymentPathSuccessful { payment_hash: Some(payment_hash), .. } => { + let path_success = matches!(event, Event::PaymentPathSuccessful { .. }); + if payment_hash == payment_hash_a { + found_expected_events[0 + if path_success { 1 } else { 0 }] = true; + } else if payment_hash == payment_hash_b { + found_expected_events[2 + if path_success { 1 } else { 0 }] = true; + } else { + panic!("Wrong payment hash {event:?}"); + } + }, + _ => panic!("Wrong event {event:?}"), + } + } + assert_eq!(found_expected_events, [true, true, true, true]); + + // However if we connect one more block the third HTLC will time out and A should claim it + connect_blocks(&nodes[0], 1); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + if use_third_htlc { + assert_eq!(txn.len(), 1); + let as_third_htlc_spend_tx = txn.pop().unwrap(); + check_spends!(as_third_htlc_spend_tx, commitment_tx); + // Previously, node A would generate a bogus claim here, trying to claim both HTLCs B and C in + // one transaction, so we check that the single input being spent was not already spent in node + // B's HTLC claim transaction. + assert_eq!(as_third_htlc_spend_tx.input.len(), 1, "{as_third_htlc_spend_tx:?}"); + for spent_input in bs_htlc_spend_tx.input.iter() { + let third_htlc_vout = as_third_htlc_spend_tx.input[0].previous_output.vout; + assert_ne!(third_htlc_vout, spent_input.previous_output.vout); + } + + mine_transaction(&nodes[0], &as_third_htlc_spend_tx); + + assert_eq!(&nodes[0].node.get_and_clear_pending_events(), &[]); + } else { + assert_eq!(txn.len(), 0); + // Connect a block so that both cases end with the same height + connect_blocks(&nodes[0], 1); + } + + // At this point all HTLCs have been resolved and no further transactions should be generated. + // We connect blocks until one block before `bs_htlc_spend_tx` reaches `ANTI_REORG_DELAY` + // confirmations. + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 4); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 0); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + + if reorg_out { + // Reorg out bs_htlc_spend_tx, letting node A claim all the HTLCs instead. + disconnect_blocks(&nodes[0], ANTI_REORG_DELAY - 2); + assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0); + + // As soon as bs_htlc_spend_tx is disconnected, node A should consider all HTLCs + // claimable-on-timeout. + disconnect_blocks(&nodes[0], 1); + let balances = nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]); + assert_eq!(balances.len(), if use_third_htlc { 3 } else { 2 }); + for balance in balances { + if let Balance::MaybeTimeoutClaimableHTLC { .. } = balance { + } else { + panic!("Unexpected balance {balance:?}"); + } + } + + connect_blocks(&nodes[0], 100); + let txn = nodes[0].tx_broadcaster.txn_broadcast(); + let mut claiming_outpoints = new_hash_set(); + for tx in txn.iter() { + for input in tx.input.iter() { + claiming_outpoints.insert(input.previous_output); + } + } + assert_eq!(claiming_outpoints.len(), if use_third_htlc { 3 } else { 2 }); + } else { + // Connect a final block, which puts `bs_htlc_spend_tx` at `ANTI_REORG_DELAY` and we wipe + // the claimable balances for the first two HTLCs. + connect_blocks(&nodes[0], 1); + let balances = nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]); + assert_eq!(balances.len(), if use_third_htlc { 1 } else { 0 }); + + // Connect two more blocks to get `as_third_htlc_spend_tx` to `ANTI_REORG_DELAY` confs. + connect_blocks(&nodes[0], 2); + if use_third_htlc { + let failed_events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(failed_events.len(), 2); + let mut found_expected_events = [false, false]; + for event in failed_events { + match event { + Event::PaymentFailed { payment_hash: Some(payment_hash), .. }|Event::PaymentPathFailed { payment_hash, .. } => { + let path_failed = matches!(event, Event::PaymentPathFailed { .. }); + if payment_hash == payment_hash_c { + found_expected_events[if path_failed { 1 } else { 0 }] = true; + } else { + panic!("Wrong payment hash {event:?}"); + } + }, + _ => panic!("Wrong event {event:?}"), + } + } + assert_eq!(found_expected_events, [true, true]); + } + + // Further, there should be no spendable balances. + assert!(nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]).is_empty()); + } +} + +#[test] +fn test_split_htlc_expiry_tracking() { + do_test_split_htlc_expiry_tracking(true, true); + do_test_split_htlc_expiry_tracking(false, true); + do_test_split_htlc_expiry_tracking(true, false); + do_test_split_htlc_expiry_tracking(false, false); +} diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index fc097d5f915..7eb82722134 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -1779,7 +1779,7 @@ where } fn test_node_counter_consistency(&self) { - #[cfg(debug_assertions)] + #[cfg(test)] { let channels = self.channels.read().unwrap(); let nodes = self.nodes.read().unwrap(); diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index c09a014dc62..33ad0f936ba 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1161,18 +1161,17 @@ impl_writeable_tlv_based!(RouteHintHop, { }); #[derive(Eq, PartialEq)] -#[repr(align(64))] // Force the size to 64 bytes +#[repr(align(32))] // Force the size to 32 bytes struct RouteGraphNode { - node_id: NodeId, node_counter: u32, - score: u64, + score: u128, // The maximum value a yet-to-be-constructed payment path might flow through this node. // This value is upper-bounded by us by: // - how much is needed for a path being constructed // - how much value can channels following this node (up to the destination) can contribute, // considering their capacity and fees value_contribution_msat: u64, - total_cltv_delta: u32, + total_cltv_delta: u16, /// The number of hops walked up to this node. path_length_to_node: u8, } @@ -1193,9 +1192,8 @@ impl cmp::PartialOrd for RouteGraphNode { } // While RouteGraphNode can be laid out with fewer bytes, performance appears to be improved -// substantially when it is laid out at exactly 64 bytes. -const _GRAPH_NODE_SMALL: usize = 64 - core::mem::size_of::(); -const _GRAPH_NODE_FIXED_SIZE: usize = core::mem::size_of::() - 64; +// substantially when it is laid out at exactly 32 bytes. +const _GRAPH_NODE_32: () = assert!(core::mem::size_of::() == 32); /// A [`CandidateRouteHop::FirstHop`] entry. #[derive(Clone, Debug)] @@ -1879,6 +1877,22 @@ impl<'a> PaymentPath<'a> { return result; } + /// Gets the cost (fees plus scorer penalty in msats) of the path divided by the value we + /// can/will send over the path. This is also the heap score during our Dijkstra's walk. + fn get_cost_per_msat(&self) -> u128 { + let fee_cost = self.get_cost_msat(); + let value_msat = self.get_value_msat(); + debug_assert!(value_msat > 0, "Paths should always send more than 0 msat"); + if fee_cost == u64::MAX || value_msat == 0 { + u64::MAX.into() + } else { + // In order to avoid integer division precision loss, we simply shift the costs up to + // the top half of a u128 and divide by the value (which is, at max, just under a u64). + ((fee_cost as u128) << 64) / value_msat as u128 + } + } + + /// Gets the fees plus scorer penalty in msats of the path. fn get_cost_msat(&self) -> u64 { self.get_total_fee_paid_msat().saturating_add(self.get_path_penalty_msat()) } @@ -2442,6 +2456,16 @@ where L::Target: Logger { // drop the requirement by setting this to 0. let mut channel_saturation_pow_half = payment_params.max_channel_saturation_power_of_half; + // In order to already account for some of the privacy enhancing random CLTV + // expiry delta offset we add on top later, we subtract a rough estimate + // (2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) here. + let max_total_cltv_expiry_delta: u16 = + (payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta) + .checked_sub(2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) + .unwrap_or(payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta) + .try_into() + .unwrap_or(u16::MAX); + // Keep track of how much liquidity has been used in selected channels or blinded paths. Used to // determine if the channel can be used by additional MPP paths or to inform path finding // decisions. It is aware of direction *only* to ensure that the correct htlc_maximum_msat value @@ -2523,25 +2547,19 @@ where L::Target: Logger { *used_liquidity_msat }); - // Verify the liquidity offered by this channel complies to the minimal contribution. - let contributes_sufficient_value = available_value_contribution_msat >= minimal_value_contribution_msat; // Do not consider candidate hops that would exceed the maximum path length. let path_length_to_node = $next_hops_path_length + if $candidate.blinded_hint_idx().is_some() { 0 } else { 1 }; let exceeds_max_path_length = path_length_to_node > max_path_length; // Do not consider candidates that exceed the maximum total cltv expiry limit. - // In order to already account for some of the privacy enhancing random CLTV - // expiry delta offset we add on top later, we subtract a rough estimate - // (2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) here. - let max_total_cltv_expiry_delta = (payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta) - .checked_sub(2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) - .unwrap_or(payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta); let hop_total_cltv_delta = ($next_hops_cltv_delta as u32) .saturating_add(cltv_expiry_delta); - let exceeds_cltv_delta_limit = hop_total_cltv_delta > max_total_cltv_expiry_delta; + let exceeds_cltv_delta_limit = hop_total_cltv_delta > max_total_cltv_expiry_delta as u32; let value_contribution_msat = cmp::min(available_value_contribution_msat, $next_hops_value_contribution); + // Verify the liquidity offered by this channel complies to the minimal contribution. + let contributes_sufficient_value = value_contribution_msat >= minimal_value_contribution_msat; // Includes paying fees for the use of the following channels. let amount_to_transfer_over_msat: u64 = match value_contribution_msat.checked_add($next_hops_fee_msat) { Some(result) => result, @@ -2691,7 +2709,7 @@ where L::Target: Logger { // Ignore hops if augmenting the current path to them would put us over `max_total_routing_fee_msat` if total_fee_msat > max_total_routing_fee_msat { if should_log_candidate { - log_trace!(logger, "Ignoring {} due to exceeding max total routing fee limit.", LoggedCandidateHop(&$candidate)); + log_trace!(logger, "Ignoring {} with fee {total_fee_msat} due to exceeding max total routing fee limit {max_total_routing_fee_msat}.", LoggedCandidateHop(&$candidate)); if let Some(_) = first_hop_details { log_trace!(logger, @@ -2732,25 +2750,41 @@ where L::Target: Logger { // but it may require additional tracking - we don't want to double-count // the fees included in $next_hops_path_htlc_minimum_msat, but also // can't use something that may decrease on future hops. - let old_cost = cmp::max(old_entry.total_fee_msat, old_entry.path_htlc_minimum_msat) + let old_fee_cost = cmp::max(old_entry.total_fee_msat, old_entry.path_htlc_minimum_msat) .saturating_add(old_entry.path_penalty_msat); - let new_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat) + let new_fee_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat) .saturating_add(path_penalty_msat); - let should_replace = - new_cost < old_cost - || (new_cost == old_cost && old_entry.value_contribution_msat < value_contribution_msat); + // The actual score we use for our heap is the cost divided by how + // much we are thinking of sending over this channel. This avoids + // prioritizing channels that have a very low fee because we aren't + // sending very much over them. + // In order to avoid integer division precision loss, we simply + // shift the costs up to the top half of a u128 and divide by the + // value (which is, at max, just under a u64). + let old_cost = if old_fee_cost != u64::MAX && old_entry.value_contribution_msat != 0 { + ((old_fee_cost as u128) << 64) / old_entry.value_contribution_msat as u128 + } else { + u128::MAX + }; + let new_cost = if new_fee_cost != u64::MAX { + // value_contribution_msat is always >= 1, checked above via + // `contributes_sufficient_value`. + ((new_fee_cost as u128) << 64) / value_contribution_msat as u128 + } else { + u128::MAX + }; - if !old_entry.was_processed && should_replace { + if !old_entry.was_processed && new_cost < old_cost { #[cfg(all(not(ldk_bench), any(test, fuzzing)))] { assert!(!old_entry.best_path_from_hop_selected); + assert!(hop_total_cltv_delta <= u16::MAX as u32); } let new_graph_node = RouteGraphNode { - node_id: src_node_id, node_counter: src_node_counter, - score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat), - total_cltv_delta: hop_total_cltv_delta, + score: new_cost, + total_cltv_delta: hop_total_cltv_delta as u16, value_contribution_msat, path_length_to_node, }; @@ -2824,7 +2858,7 @@ where L::Target: Logger { // meaning how much will be paid in fees after this node (to the best of our knowledge). // This data can later be helpful to optimize routing (pay lower fees). macro_rules! add_entries_to_cheapest_to_target_node { - ( $node: expr, $node_counter: expr, $node_id: expr, $next_hops_value_contribution: expr, + ( $node_counter: expr, $node_id: expr, $next_hops_value_contribution: expr, $next_hops_cltv_delta: expr, $next_hops_path_length: expr ) => { let fee_to_target_msat; let next_hops_path_htlc_minimum_msat; @@ -2880,7 +2914,7 @@ where L::Target: Logger { } } - if let Some(node) = $node { + if let Some(node) = network_nodes.get(&$node_id) { let features = if let Some(node_info) = node.announcement_info.as_ref() { &node_info.features() } else { @@ -3007,7 +3041,7 @@ where L::Target: Logger { entry.value_contribution_msat = path_value_msat; } add_entries_to_cheapest_to_target_node!( - network_nodes.get(&payee), payee_node_counter, payee, path_value_msat, 0, 0 + payee_node_counter, payee, path_value_msat, 0, 0 ); } @@ -3082,11 +3116,11 @@ where L::Target: Logger { // Both these cases (and other cases except reaching recommended_value_msat) mean that // paths_collection will be stopped because found_new_path==false. // This is not necessarily a routing failure. - 'path_construction: while let Some(RouteGraphNode { node_id, node_counter, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() { + 'path_construction: while let Some(RouteGraphNode { node_counter, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() { // Since we're going payee-to-payer, hitting our node as a target means we should stop // traversing the graph and arrange the path out of what we found. - if node_id == our_node_id { + if node_counter == payer_node_counter { let mut new_entry = dist[payer_node_counter as usize].take().unwrap(); let mut ordered_hops: Vec<(PathBuildingHop, NodeFeatures)> = vec!((new_entry.clone(), default_node_features.clone())); @@ -3209,13 +3243,20 @@ where L::Target: Logger { // If we found a path back to the payee, we shouldn't try to process it again. This is // the equivalent of the `elem.was_processed` check in // add_entries_to_cheapest_to_target_node!() (see comment there for more info). - if node_id == maybe_dummy_payee_node_id { continue 'path_construction; } + if node_counter == payee_node_counter { continue 'path_construction; } + + let node_id = if let Some(entry) = &dist[node_counter as usize] { + entry.candidate.source() + } else { + debug_assert!(false, "Best nodes in the heap should have entries in dist"); + continue 'path_construction; + }; // Otherwise, since the current target node is not us, // keep "unrolling" the payment graph from payee to payer by // finding a way to reach the current target from the payer side. add_entries_to_cheapest_to_target_node!( - network_nodes.get(&node_id), node_counter, node_id, + node_counter, node_id, value_contribution_msat, total_cltv_delta, path_length_to_node ); @@ -3290,10 +3331,7 @@ where L::Target: Logger { // First, sort by the cost-per-value of the path, dropping the paths that cost the most for // the value they contribute towards the payment amount. // We sort in descending order as we will remove from the front in `retain`, next. - selected_route.sort_unstable_by(|a, b| - (((b.get_cost_msat() as u128) << 64) / (b.get_value_msat() as u128)) - .cmp(&(((a.get_cost_msat() as u128) << 64) / (a.get_value_msat() as u128))) - ); + selected_route.sort_unstable_by(|a, b| b.get_cost_per_msat().cmp(&a.get_cost_per_msat())); // We should make sure that at least 1 path left. let mut paths_left = selected_route.len(); @@ -8645,6 +8683,207 @@ mod tests { assert_eq!(route.paths[0].hops[0].short_channel_id, 44); } + + #[test] + fn prefers_paths_by_cost_amt_ratio() { + // Previously, we preferred paths during MPP selection based on their absolute cost, rather + // than the cost-per-amount-transferred. This could result in selecting many MPP paths with + // relatively low value contribution, rather than one large path which is ultimately + // cheaper. While this is a tradeoff (and not universally better), in practice the old + // behavior was problematic, so we shifted to a proportional cost. + // + // Here we check that the proportional cost is being used in a somewhat absurd setup where + // we have one good path and several cheaper, but smaller paths. + let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph(); + let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx); + let scorer = ln_test_utils::TestScorer::new(); + let random_seed_bytes = [42; 32]; + + // Enable channel 1 + let update_1 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 1, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (1 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 10_000_000, + fee_base_msat: 0, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &our_privkey, update_1); + + // Set the fee on channel 3 to 1 sat, max HTLC to 1M msat + let update_3 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 3, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (3 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[0], update_3); + + // Set the fee on channel 13 to 1 sat, max HTLC to 1M msat + let update_13 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 13, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (13 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[7], update_13); + + // Set the fee on channel 4 to 1 sat, max HTLC to 1M msat + let update_4 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 4, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (4 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[1], update_4); + + // The router will attempt to gather 3x the requested amount, and if it finds the new path + // through channel 16, added below, it'll always prefer that, even prior to the changes + // which introduced this test. + // Instead, we add 6 additional channels so that the pathfinder always just gathers useless + // paths first. + for i in 0..6 { + // Finally, create a single channel with fee of 2 sat from node 1 to node 2 which allows + // for a larger payment. + let chan_features = ChannelFeatures::from_le_bytes(vec![]); + add_channel(&gossip_sync, &secp_ctx, &privkeys[7], &privkeys[2], chan_features, i + 42); + + // Set the fee on channel 16 to 2 sats, max HTLC to 3M msat + let update_a = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: i + 42, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (42 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[7], update_a); + + // Enable channel 16 by providing an update in both directions + let update_b = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: i + 42, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 1, + cltv_expiry_delta: (42 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 10_000_000, + fee_base_msat: u32::MAX, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[2], update_b); + } + + // Ensure that we can build a route for 3M msat across the three paths to node 2. + let config = UserConfig::default(); + let mut payment_params = PaymentParameters::from_node_id(nodes[2], 42) + .with_bolt11_features(channelmanager::provided_bolt11_invoice_features(&config)) + .unwrap(); + payment_params.max_channel_saturation_power_of_half = 0; + let route_params = + RouteParameters::from_payment_params_and_value(payment_params, 3_000_000); + let route = get_route( + &our_id, + &route_params, + &network_graph.read_only(), + None, + Arc::clone(&logger), + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); + assert_eq!(route.paths.len(), 3); + for path in route.paths { + assert_eq!(path.hops.len(), 2); + } + + // Finally, create a single channel with fee of 2 sat from node 1 to node 2 which allows + // for a larger payment. + let features_16 = ChannelFeatures::from_le_bytes(id_to_feature_flags(16)); + add_channel(&gossip_sync, &secp_ctx, &privkeys[1], &privkeys[2], features_16, 16); + + // Set the fee on channel 16 to 2 sats, max HTLC to 3M msat + let update_16_a = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 16, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (16 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 3_000_000, + fee_base_msat: 2_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[1], update_16_a); + + // Enable channel 16 by providing an update in both directions + let update_16_b = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 16, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 1, + cltv_expiry_delta: (16 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 10_000_000, + fee_base_msat: u32::MAX, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[2], update_16_b); + + // Ensure that we now build a route for 3M msat across just the new path + let route = get_route( + &our_id, + &route_params, + &network_graph.read_only(), + None, + Arc::clone(&logger), + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); + assert_eq!(route.paths.len(), 1); + assert_eq!(route.paths[0].hops.len(), 2); + assert_eq!(route.paths[0].hops[1].short_channel_id, 16); + } } #[cfg(any(test, ldk_bench))] diff --git a/lightning/src/routing/test_utils.rs b/lightning/src/routing/test_utils.rs index 258652b575d..380f4dbe223 100644 --- a/lightning/src/routing/test_utils.rs +++ b/lightning/src/routing/test_utils.rs @@ -110,7 +110,7 @@ pub(crate) fn update_channel( match gossip_sync.handle_channel_update(Some(node_pubkey), &valid_channel_update) { Ok(res) => assert!(res), - Err(_) => panic!() + Err(e) => panic!("{e:?}") }; }